2024-12-09 13:29:19,921 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-09 13:29:19,963 main DEBUG Took 0.030651 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 13:29:19,964 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 13:29:19,965 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 13:29:19,966 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 13:29:19,968 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:19,987 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 13:29:20,036 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,044 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,045 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,045 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,046 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,047 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,048 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,048 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,049 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,050 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,063 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,063 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,064 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,064 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,065 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,065 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,066 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,066 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,067 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,067 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,068 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,069 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,071 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,071 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 13:29:20,074 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,074 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 13:29:20,084 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 13:29:20,086 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 13:29:20,093 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 13:29:20,101 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 13:29:20,106 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 13:29:20,106 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 13:29:20,118 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 13:29:20,136 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 13:29:20,143 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 13:29:20,144 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 13:29:20,151 main DEBUG createAppenders(={Console}) 2024-12-09 13:29:20,152 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-09 13:29:20,153 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-09 13:29:20,153 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-09 13:29:20,154 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 13:29:20,155 main DEBUG OutputStream closed 2024-12-09 13:29:20,155 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 13:29:20,155 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 13:29:20,156 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-09 13:29:20,313 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 13:29:20,316 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 13:29:20,318 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 13:29:20,319 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 13:29:20,320 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 13:29:20,321 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 13:29:20,321 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 13:29:20,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 13:29:20,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 13:29:20,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 13:29:20,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 13:29:20,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 13:29:20,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 13:29:20,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 13:29:20,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 13:29:20,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 13:29:20,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 13:29:20,328 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 13:29:20,331 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 13:29:20,331 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-09 13:29:20,332 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 13:29:20,333 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-09T13:29:20,364 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-09 13:29:20,368 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 13:29:20,369 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T13:29:20,894 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4 2024-12-09T13:29:20,896 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-09T13:29:20,979 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T13:29:21,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T13:29:21,237 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c, deleteOnExit=true 2024-12-09T13:29:21,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T13:29:21,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/test.cache.data in system properties and HBase conf 2024-12-09T13:29:21,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T13:29:21,240 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir in system properties and HBase conf 2024-12-09T13:29:21,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T13:29:21,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T13:29:21,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T13:29:21,352 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T13:29:21,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T13:29:21,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T13:29:21,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T13:29:21,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T13:29:21,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T13:29:21,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T13:29:21,360 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T13:29:21,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T13:29:21,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T13:29:21,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/nfs.dump.dir in system properties and HBase conf 2024-12-09T13:29:21,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir in system properties and HBase conf 2024-12-09T13:29:21,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T13:29:21,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T13:29:21,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T13:29:22,705 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T13:29:22,806 INFO [Time-limited test {}] log.Log(170): Logging initialized @3975ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T13:29:22,891 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:22,991 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:23,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:23,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:23,056 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T13:29:23,080 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:23,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@654c02d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:23,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cd6ab6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:23,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12351f7e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-34039-hadoop-hdfs-3_4_1-tests_jar-_-any-1364893239861234481/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T13:29:23,375 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:34039} 2024-12-09T13:29:23,376 INFO [Time-limited test {}] server.Server(415): Started @4546ms 2024-12-09T13:29:23,987 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:23,994 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:24,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:24,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:24,001 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T13:29:24,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4521559e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:24,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3622d218{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:24,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74bb782c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-43791-hadoop-hdfs-3_4_1-tests_jar-_-any-11030453972724863774/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T13:29:24,111 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:43791} 2024-12-09T13:29:24,112 INFO [Time-limited test {}] server.Server(415): Started @5282ms 2024-12-09T13:29:24,182 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T13:29:24,344 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:24,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:24,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:24,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:24,370 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T13:29:24,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32a76e2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:24,372 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1de9333b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:24,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8fd4906{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-40739-hadoop-hdfs-3_4_1-tests_jar-_-any-4641981039911020303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T13:29:24,516 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40739} 2024-12-09T13:29:24,516 INFO [Time-limited test {}] server.Server(415): Started @5686ms 2024-12-09T13:29:24,519 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T13:29:24,582 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:24,587 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:24,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:24,588 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:24,589 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T13:29:24,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2de28195{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:24,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@108f4b55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:24,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@474673d3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-40615-hadoop-hdfs-3_4_1-tests_jar-_-any-10769298625707184588/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T13:29:24,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:40615} 2024-12-09T13:29:24,727 INFO [Time-limited test {}] server.Server(415): Started @5897ms 2024-12-09T13:29:24,729 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T13:29:25,768 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175/current, will proceed with Du for space computation calculation, 2024-12-09T13:29:25,768 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175/current, will proceed with Du for space computation calculation, 2024-12-09T13:29:25,768 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175/current, will proceed with Du for space computation calculation, 2024-12-09T13:29:25,768 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175/current, will proceed with Du for space computation calculation, 2024-12-09T13:29:25,829 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T13:29:25,829 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T13:29:25,887 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2015aaa6d953ef with lease ID 0xc6a5883b00aa331a: Processing first storage report for DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9 from datanode DatanodeRegistration(127.0.0.1:42277, datanodeUuid=b6b1635a-69ea-425d-be53-eee8bef8009a, infoPort=34425, infoSecurePort=0, ipcPort=37147, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175) 2024-12-09T13:29:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2015aaa6d953ef with lease ID 0xc6a5883b00aa331a: from storage DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9 node DatanodeRegistration(127.0.0.1:42277, datanodeUuid=b6b1635a-69ea-425d-be53-eee8bef8009a, infoPort=34425, infoSecurePort=0, ipcPort=37147, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T13:29:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7df9ab089575b7c with lease ID 0xc6a5883b00aa331b: Processing first storage report for DS-f429e652-0d3e-448e-8297-4199c22c018a from datanode DatanodeRegistration(127.0.0.1:39425, datanodeUuid=18a88ea6-5588-4239-b493-08d886227bd8, infoPort=42881, infoSecurePort=0, ipcPort=35267, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175) 2024-12-09T13:29:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7df9ab089575b7c with lease ID 0xc6a5883b00aa331b: from storage DS-f429e652-0d3e-448e-8297-4199c22c018a node DatanodeRegistration(127.0.0.1:39425, datanodeUuid=18a88ea6-5588-4239-b493-08d886227bd8, infoPort=42881, infoSecurePort=0, ipcPort=35267, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T13:29:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2015aaa6d953ef with lease ID 0xc6a5883b00aa331a: Processing first storage report for DS-c96e4826-2cea-44c5-8377-6d61c19a2e4d from datanode DatanodeRegistration(127.0.0.1:42277, datanodeUuid=b6b1635a-69ea-425d-be53-eee8bef8009a, infoPort=34425, infoSecurePort=0, ipcPort=37147, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175) 2024-12-09T13:29:25,889 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2015aaa6d953ef with lease ID 0xc6a5883b00aa331a: from storage DS-c96e4826-2cea-44c5-8377-6d61c19a2e4d node DatanodeRegistration(127.0.0.1:42277, datanodeUuid=b6b1635a-69ea-425d-be53-eee8bef8009a, infoPort=34425, infoSecurePort=0, ipcPort=37147, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T13:29:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7df9ab089575b7c with lease ID 0xc6a5883b00aa331b: Processing first storage report for DS-3489b39b-9c3b-47e5-af01-163609fe568e from datanode DatanodeRegistration(127.0.0.1:39425, datanodeUuid=18a88ea6-5588-4239-b493-08d886227bd8, infoPort=42881, infoSecurePort=0, ipcPort=35267, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175) 2024-12-09T13:29:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7df9ab089575b7c with lease ID 0xc6a5883b00aa331b: from storage DS-3489b39b-9c3b-47e5-af01-163609fe568e node DatanodeRegistration(127.0.0.1:39425, datanodeUuid=18a88ea6-5588-4239-b493-08d886227bd8, infoPort=42881, infoSecurePort=0, ipcPort=35267, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T13:29:25,920 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175/current, will proceed with Du for space computation calculation, 2024-12-09T13:29:25,920 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175/current, will proceed with Du for space computation calculation, 2024-12-09T13:29:25,947 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T13:29:25,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2e70195b4634620 with lease ID 0xc6a5883b00aa331c: Processing first storage report for DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac from datanode DatanodeRegistration(127.0.0.1:33239, datanodeUuid=cd327201-52fd-46ac-a68b-2f1a18c83819, infoPort=42461, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175) 2024-12-09T13:29:25,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2e70195b4634620 with lease ID 0xc6a5883b00aa331c: from storage DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac node DatanodeRegistration(127.0.0.1:33239, datanodeUuid=cd327201-52fd-46ac-a68b-2f1a18c83819, infoPort=42461, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T13:29:25,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2e70195b4634620 with lease ID 0xc6a5883b00aa331c: Processing first storage report for DS-c691e9a6-da92-48fc-a557-44c883cdbaad from datanode DatanodeRegistration(127.0.0.1:33239, datanodeUuid=cd327201-52fd-46ac-a68b-2f1a18c83819, infoPort=42461, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175) 2024-12-09T13:29:25,954 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2e70195b4634620 with lease ID 0xc6a5883b00aa331c: from storage DS-c691e9a6-da92-48fc-a557-44c883cdbaad node DatanodeRegistration(127.0.0.1:33239, datanodeUuid=cd327201-52fd-46ac-a68b-2f1a18c83819, infoPort=42461, infoSecurePort=0, ipcPort=42289, storageInfo=lv=-57;cid=testClusterID;nsid=218601224;c=1733750962175), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T13:29:26,022 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4 2024-12-09T13:29:26,120 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/zookeeper_0, clientPort=57683, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T13:29:26,131 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57683 2024-12-09T13:29:26,146 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:26,150 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:26,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741825_1001 (size=7) 2024-12-09T13:29:26,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741825_1001 (size=7) 2024-12-09T13:29:26,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741825_1001 (size=7) 2024-12-09T13:29:26,888 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 with version=8 2024-12-09T13:29:26,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/hbase-staging 2024-12-09T13:29:27,003 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T13:29:27,225 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c48e4b8eb196:0 server-side Connection retries=45 2024-12-09T13:29:27,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:27,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:27,241 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T13:29:27,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:27,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T13:29:27,414 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T13:29:27,489 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T13:29:27,500 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T13:29:27,505 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T13:29:27,530 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 119998 (auto-detected) 2024-12-09T13:29:27,531 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T13:29:27,555 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45049 2024-12-09T13:29:27,589 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45049 connecting to ZooKeeper ensemble=127.0.0.1:57683 2024-12-09T13:29:27,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450490x0, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T13:29:27,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45049-0x1000ad205eb0000 connected 2024-12-09T13:29:27,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:27,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:27,899 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:29:27,903 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7, hbase.cluster.distributed=false 2024-12-09T13:29:27,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T13:29:27,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45049 2024-12-09T13:29:27,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45049 2024-12-09T13:29:27,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45049 2024-12-09T13:29:27,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45049 2024-12-09T13:29:27,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45049 2024-12-09T13:29:28,080 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c48e4b8eb196:0 server-side Connection retries=45 2024-12-09T13:29:28,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,082 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T13:29:28,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T13:29:28,085 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T13:29:28,088 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T13:29:28,089 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44683 2024-12-09T13:29:28,091 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44683 connecting to ZooKeeper ensemble=127.0.0.1:57683 2024-12-09T13:29:28,092 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446830x0, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T13:29:28,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446830x0, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:29:28,115 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44683-0x1000ad205eb0001 connected 2024-12-09T13:29:28,120 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T13:29:28,130 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T13:29:28,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T13:29:28,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T13:29:28,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44683 2024-12-09T13:29:28,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44683 2024-12-09T13:29:28,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44683 2024-12-09T13:29:28,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44683 2024-12-09T13:29:28,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44683 2024-12-09T13:29:28,164 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c48e4b8eb196:0 server-side Connection retries=45 2024-12-09T13:29:28,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,165 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T13:29:28,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T13:29:28,165 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T13:29:28,166 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T13:29:28,167 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36493 2024-12-09T13:29:28,170 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36493 connecting to ZooKeeper ensemble=127.0.0.1:57683 2024-12-09T13:29:28,171 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364930x0, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T13:29:28,195 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36493-0x1000ad205eb0002 connected 2024-12-09T13:29:28,196 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:29:28,196 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T13:29:28,202 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T13:29:28,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T13:29:28,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T13:29:28,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36493 2024-12-09T13:29:28,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36493 2024-12-09T13:29:28,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36493 2024-12-09T13:29:28,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36493 2024-12-09T13:29:28,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36493 2024-12-09T13:29:28,236 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c48e4b8eb196:0 server-side Connection retries=45 2024-12-09T13:29:28,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,237 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T13:29:28,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T13:29:28,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T13:29:28,237 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T13:29:28,237 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T13:29:28,238 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39211 2024-12-09T13:29:28,240 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39211 connecting to ZooKeeper ensemble=127.0.0.1:57683 2024-12-09T13:29:28,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,244 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392110x0, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T13:29:28,255 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:392110x0, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:29:28,256 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T13:29:28,259 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39211-0x1000ad205eb0003 connected 2024-12-09T13:29:28,264 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T13:29:28,265 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T13:29:28,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T13:29:28,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39211 2024-12-09T13:29:28,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39211 2024-12-09T13:29:28,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39211 2024-12-09T13:29:28,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39211 2024-12-09T13:29:28,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39211 2024-12-09T13:29:28,295 DEBUG [M:0;c48e4b8eb196:45049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c48e4b8eb196:45049 2024-12-09T13:29:28,296 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:28,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,317 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,380 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T13:29:28,381 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c48e4b8eb196,45049,1733750967073 from backup master directory 2024-12-09T13:29:28,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:28,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T13:29:28,389 WARN [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T13:29:28,389 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:28,392 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T13:29:28,393 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T13:29:28,454 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/hbase.id] with ID: 93ce3e14-a429-4982-9e09-44291b12e9d6 2024-12-09T13:29:28,455 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.tmp/hbase.id 2024-12-09T13:29:28,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741826_1002 (size=42) 2024-12-09T13:29:28,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741826_1002 (size=42) 2024-12-09T13:29:28,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741826_1002 (size=42) 2024-12-09T13:29:28,471 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.tmp/hbase.id]:[hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/hbase.id] 2024-12-09T13:29:28,521 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:28,524 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T13:29:28,542 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-09T13:29:28,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:28,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741827_1003 (size=196) 2024-12-09T13:29:28,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741827_1003 (size=196) 2024-12-09T13:29:28,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741827_1003 (size=196) 2024-12-09T13:29:28,600 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:29:28,602 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T13:29:28,616 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:29:28,621 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T13:29:28,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741828_1004 (size=1189) 2024-12-09T13:29:28,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741828_1004 (size=1189) 2024-12-09T13:29:28,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741828_1004 (size=1189) 2024-12-09T13:29:28,682 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/data/master/store 2024-12-09T13:29:28,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741829_1005 (size=34) 2024-12-09T13:29:28,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741829_1005 (size=34) 2024-12-09T13:29:28,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741829_1005 (size=34) 2024-12-09T13:29:28,715 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T13:29:28,718 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:28,719 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T13:29:28,719 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:29:28,720 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:29:28,721 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T13:29:28,721 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:29:28,721 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:29:28,722 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733750968719Disabling compacts and flushes for region at 1733750968719Disabling writes for close at 1733750968721 (+2 ms)Writing region close event to WAL at 1733750968721Closed at 1733750968721 2024-12-09T13:29:28,724 WARN [master/c48e4b8eb196:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/data/master/store/.initializing 2024-12-09T13:29:28,724 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:28,732 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T13:29:28,746 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c48e4b8eb196%2C45049%2C1733750967073, suffix=, logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073, archiveDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/oldWALs, maxLogs=10 2024-12-09T13:29:28,771 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752, exclude list is [], retry=0 2024-12-09T13:29:28,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33239,DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac,DISK] 2024-12-09T13:29:28,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39425,DS-f429e652-0d3e-448e-8297-4199c22c018a,DISK] 2024-12-09T13:29:28,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42277,DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9,DISK] 2024-12-09T13:29:28,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T13:29:28,822 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 2024-12-09T13:29:28,823 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34425:34425),(127.0.0.1/127.0.0.1:42881:42881),(127.0.0.1/127.0.0.1:42461:42461)] 2024-12-09T13:29:28,824 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:29:28,824 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:28,828 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,829 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T13:29:28,892 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:28,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:28,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,899 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T13:29:28,900 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:28,901 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:28,901 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T13:29:28,904 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:28,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:28,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T13:29:28,910 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:28,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:28,912 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,917 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,918 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,924 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,925 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,928 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T13:29:28,932 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T13:29:28,937 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:29:28,938 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69718588, jitterRate=0.03888791799545288}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T13:29:28,944 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733750968841Initializing all the Stores at 1733750968843 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750968844 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750968845 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750968845Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750968845Cleaning up temporary data from old regions at 1733750968925 (+80 ms)Region opened successfully at 1733750968944 (+19 ms) 2024-12-09T13:29:28,945 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T13:29:28,981 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e461c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c48e4b8eb196/172.17.0.2:0 2024-12-09T13:29:29,016 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T13:29:29,026 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T13:29:29,027 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T13:29:29,029 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T13:29:29,031 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T13:29:29,036 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-09T13:29:29,036 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T13:29:29,062 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T13:29:29,070 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T13:29:29,129 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T13:29:29,133 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T13:29:29,136 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T13:29:29,146 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T13:29:29,150 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T13:29:29,154 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T13:29:29,163 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T13:29:29,164 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T13:29:29,171 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T13:29:29,187 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T13:29:29,196 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T13:29:29,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T13:29:29,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T13:29:29,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T13:29:29,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T13:29:29,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,208 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c48e4b8eb196,45049,1733750967073, sessionid=0x1000ad205eb0000, setting cluster-up flag (Was=false) 2024-12-09T13:29:29,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,263 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T13:29:29,265 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:29,313 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T13:29:29,315 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:29,321 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T13:29:29,359 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-09T13:29:29,365 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:29,365 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-09T13:29:29,380 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(746): ClusterId : 93ce3e14-a429-4982-9e09-44291b12e9d6 2024-12-09T13:29:29,381 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(746): ClusterId : 93ce3e14-a429-4982-9e09-44291b12e9d6 2024-12-09T13:29:29,381 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(746): ClusterId : 93ce3e14-a429-4982-9e09-44291b12e9d6 2024-12-09T13:29:29,383 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T13:29:29,383 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T13:29:29,387 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T13:29:29,417 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T13:29:29,426 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T13:29:29,431 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T13:29:29,431 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T13:29:29,431 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T13:29:29,432 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T13:29:29,432 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T13:29:29,432 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T13:29:29,433 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T13:29:29,438 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c48e4b8eb196,45049,1733750967073 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T13:29:29,447 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T13:29:29,447 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T13:29:29,447 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T13:29:29,448 DEBUG [RS:1;c48e4b8eb196:36493 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bec13f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c48e4b8eb196/172.17.0.2:0 2024-12-09T13:29:29,448 DEBUG [RS:0;c48e4b8eb196:44683 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20dbb09b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c48e4b8eb196/172.17.0.2:0 2024-12-09T13:29:29,448 DEBUG [RS:2;c48e4b8eb196:39211 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36706368, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c48e4b8eb196/172.17.0.2:0 2024-12-09T13:29:29,449 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c48e4b8eb196:0, corePoolSize=5, maxPoolSize=5 2024-12-09T13:29:29,450 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c48e4b8eb196:0, corePoolSize=5, maxPoolSize=5 2024-12-09T13:29:29,450 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c48e4b8eb196:0, corePoolSize=5, maxPoolSize=5 2024-12-09T13:29:29,450 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c48e4b8eb196:0, corePoolSize=5, maxPoolSize=5 2024-12-09T13:29:29,450 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c48e4b8eb196:0, corePoolSize=10, maxPoolSize=10 2024-12-09T13:29:29,450 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,451 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c48e4b8eb196:0, corePoolSize=2, maxPoolSize=2 2024-12-09T13:29:29,451 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,464 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733750999464 2024-12-09T13:29:29,466 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T13:29:29,467 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T13:29:29,467 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c48e4b8eb196:44683 2024-12-09T13:29:29,467 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;c48e4b8eb196:39211 2024-12-09T13:29:29,467 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c48e4b8eb196:36493 2024-12-09T13:29:29,470 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T13:29:29,470 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T13:29:29,471 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T13:29:29,471 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T13:29:29,471 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T13:29:29,471 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T13:29:29,471 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T13:29:29,471 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T13:29:29,471 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-09T13:29:29,471 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-09T13:29:29,472 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T13:29:29,472 INFO [RS:0;c48e4b8eb196:44683 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:29,472 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T13:29:29,472 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T13:29:29,472 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T13:29:29,472 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T13:29:29,472 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-09T13:29:29,473 INFO [RS:2;c48e4b8eb196:39211 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:29,473 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T13:29:29,473 INFO [RS:1;c48e4b8eb196:36493 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:29,473 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T13:29:29,473 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,476 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(2659): reportForDuty to master=c48e4b8eb196,45049,1733750967073 with port=44683, startcode=1733750968041 2024-12-09T13:29:29,477 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(2659): reportForDuty to master=c48e4b8eb196,45049,1733750967073 with port=39211, startcode=1733750968235 2024-12-09T13:29:29,477 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:29,478 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T13:29:29,480 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(2659): reportForDuty to master=c48e4b8eb196,45049,1733750967073 with port=36493, startcode=1733750968163 2024-12-09T13:29:29,487 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T13:29:29,489 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T13:29:29,490 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T13:29:29,491 DEBUG [RS:0;c48e4b8eb196:44683 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T13:29:29,491 DEBUG [RS:1;c48e4b8eb196:36493 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T13:29:29,495 DEBUG [RS:2;c48e4b8eb196:39211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T13:29:29,496 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T13:29:29,497 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T13:29:29,504 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c48e4b8eb196:0:becomeActiveMaster-HFileCleaner.large.0-1733750969499,5,FailOnTimeoutGroup] 2024-12-09T13:29:29,511 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c48e4b8eb196:0:becomeActiveMaster-HFileCleaner.small.0-1733750969505,5,FailOnTimeoutGroup] 2024-12-09T13:29:29,511 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,511 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T13:29:29,513 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,513 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741831_1007 (size=1321) 2024-12-09T13:29:29,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741831_1007 (size=1321) 2024-12-09T13:29:29,537 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39547, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T13:29:29,537 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53429, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T13:29:29,537 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41975, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T13:29:29,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741831_1007 (size=1321) 2024-12-09T13:29:29,539 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T13:29:29,540 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:29,545 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:29,549 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] master.ServerManager(517): Registering regionserver=c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:29,560 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:29,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] master.ServerManager(517): Registering regionserver=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:29,566 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:29,566 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45819 2024-12-09T13:29:29,566 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T13:29:29,567 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:29,568 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] master.ServerManager(517): Registering regionserver=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:29,568 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:29,568 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45819 2024-12-09T13:29:29,568 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T13:29:29,573 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:29,573 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45819 2024-12-09T13:29:29,573 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T13:29:29,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741832_1008 (size=32) 2024-12-09T13:29:29,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741832_1008 (size=32) 2024-12-09T13:29:29,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741832_1008 (size=32) 2024-12-09T13:29:29,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T13:29:29,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:29,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T13:29:29,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T13:29:29,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:29,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:29,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T13:29:29,596 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T13:29:29,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:29,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:29,597 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T13:29:29,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T13:29:29,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:29,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:29,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T13:29:29,604 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T13:29:29,604 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:29,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:29,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T13:29:29,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740 2024-12-09T13:29:29,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740 2024-12-09T13:29:29,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T13:29:29,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T13:29:29,612 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T13:29:29,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T13:29:29,618 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:29:29,620 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60434125, jitterRate=-0.09946136176586151}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T13:29:29,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733750969581Initializing all the Stores at 1733750969584 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750969584Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750969587 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750969587Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750969587Cleaning up temporary data from old regions at 1733750969611 (+24 ms)Region opened successfully at 1733750969621 (+10 ms) 2024-12-09T13:29:29,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T13:29:29,622 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T13:29:29,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T13:29:29,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T13:29:29,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T13:29:29,624 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T13:29:29,624 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733750969622Disabling compacts and flushes for region at 1733750969622Disabling writes for close at 1733750969622Writing region close event to WAL at 1733750969623 (+1 ms)Closed at 1733750969624 (+1 ms) 2024-12-09T13:29:29,626 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T13:29:29,626 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T13:29:29,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T13:29:29,635 DEBUG [RS:1;c48e4b8eb196:36493 {}] zookeeper.ZKUtil(111): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:29,636 WARN [RS:1;c48e4b8eb196:36493 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T13:29:29,636 INFO [RS:1;c48e4b8eb196:36493 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T13:29:29,636 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:29,637 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c48e4b8eb196,39211,1733750968235] 2024-12-09T13:29:29,637 DEBUG [RS:0;c48e4b8eb196:44683 {}] zookeeper.ZKUtil(111): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:29,637 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c48e4b8eb196,44683,1733750968041] 2024-12-09T13:29:29,637 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c48e4b8eb196,36493,1733750968163] 2024-12-09T13:29:29,637 WARN [RS:0;c48e4b8eb196:44683 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T13:29:29,638 INFO [RS:0;c48e4b8eb196:44683 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T13:29:29,638 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:29,638 DEBUG [RS:2;c48e4b8eb196:39211 {}] zookeeper.ZKUtil(111): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:29,638 WARN [RS:2;c48e4b8eb196:39211 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T13:29:29,639 INFO [RS:2;c48e4b8eb196:39211 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T13:29:29,639 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:29,643 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T13:29:29,648 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T13:29:29,669 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T13:29:29,669 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T13:29:29,669 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T13:29:29,687 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T13:29:29,687 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T13:29:29,688 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T13:29:29,694 INFO [RS:2;c48e4b8eb196:39211 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T13:29:29,694 INFO [RS:1;c48e4b8eb196:36493 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T13:29:29,694 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,694 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,695 INFO [RS:0;c48e4b8eb196:44683 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T13:29:29,695 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T13:29:29,695 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T13:29:29,695 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,696 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T13:29:29,702 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T13:29:29,702 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T13:29:29,702 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T13:29:29,704 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,704 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,704 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,704 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,704 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c48e4b8eb196:0, corePoolSize=2, maxPoolSize=2 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c48e4b8eb196:0, corePoolSize=2, maxPoolSize=2 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c48e4b8eb196:0, corePoolSize=2, maxPoolSize=2 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,705 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,706 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,706 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,706 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c48e4b8eb196:0, corePoolSize=1, maxPoolSize=1 2024-12-09T13:29:29,706 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0, corePoolSize=3, maxPoolSize=3 2024-12-09T13:29:29,706 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0, corePoolSize=3, maxPoolSize=3 2024-12-09T13:29:29,706 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0, corePoolSize=3, maxPoolSize=3 2024-12-09T13:29:29,706 DEBUG [RS:0;c48e4b8eb196:44683 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c48e4b8eb196:0, corePoolSize=3, maxPoolSize=3 2024-12-09T13:29:29,706 DEBUG [RS:1;c48e4b8eb196:36493 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c48e4b8eb196:0, corePoolSize=3, maxPoolSize=3 2024-12-09T13:29:29,706 DEBUG [RS:2;c48e4b8eb196:39211 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c48e4b8eb196:0, corePoolSize=3, maxPoolSize=3 2024-12-09T13:29:29,712 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,712 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,713 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,713 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,713 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,713 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,36493,1733750968163-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T13:29:29,715 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,715 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,715 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,715 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,715 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,715 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,39211,1733750968235-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T13:29:29,719 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,719 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,719 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,719 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,720 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,720 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,44683,1733750968041-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T13:29:29,739 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T13:29:29,739 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T13:29:29,742 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,36493,1733750968163-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,742 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,39211,1733750968235-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,742 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,742 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,742 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.Replication(171): c48e4b8eb196,39211,1733750968235 started 2024-12-09T13:29:29,742 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.Replication(171): c48e4b8eb196,36493,1733750968163 started 2024-12-09T13:29:29,742 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T13:29:29,742 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,44683,1733750968041-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,743 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,743 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.Replication(171): c48e4b8eb196,44683,1733750968041 started 2024-12-09T13:29:29,767 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,767 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,767 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1482): Serving as c48e4b8eb196,39211,1733750968235, RpcServer on c48e4b8eb196/172.17.0.2:39211, sessionid=0x1000ad205eb0003 2024-12-09T13:29:29,767 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1482): Serving as c48e4b8eb196,44683,1733750968041, RpcServer on c48e4b8eb196/172.17.0.2:44683, sessionid=0x1000ad205eb0001 2024-12-09T13:29:29,768 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T13:29:29,768 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T13:29:29,768 DEBUG [RS:0;c48e4b8eb196:44683 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:29,768 DEBUG [RS:2;c48e4b8eb196:39211 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:29,769 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c48e4b8eb196,39211,1733750968235' 2024-12-09T13:29:29,769 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c48e4b8eb196,44683,1733750968041' 2024-12-09T13:29:29,769 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T13:29:29,769 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T13:29:29,770 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:29,770 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T13:29:29,770 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T13:29:29,770 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1482): Serving as c48e4b8eb196,36493,1733750968163, RpcServer on c48e4b8eb196/172.17.0.2:36493, sessionid=0x1000ad205eb0002 2024-12-09T13:29:29,770 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T13:29:29,771 DEBUG [RS:1;c48e4b8eb196:36493 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:29,771 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T13:29:29,771 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c48e4b8eb196,36493,1733750968163' 2024-12-09T13:29:29,771 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T13:29:29,771 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T13:29:29,771 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T13:29:29,771 DEBUG [RS:2;c48e4b8eb196:39211 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:29,771 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T13:29:29,771 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c48e4b8eb196,39211,1733750968235' 2024-12-09T13:29:29,771 DEBUG [RS:0;c48e4b8eb196:44683 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:29,771 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T13:29:29,771 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c48e4b8eb196,44683,1733750968041' 2024-12-09T13:29:29,771 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T13:29:29,772 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T13:29:29,772 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T13:29:29,772 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T13:29:29,772 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T13:29:29,772 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T13:29:29,772 DEBUG [RS:1;c48e4b8eb196:36493 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:29,772 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c48e4b8eb196,36493,1733750968163' 2024-12-09T13:29:29,773 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T13:29:29,773 DEBUG [RS:0;c48e4b8eb196:44683 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T13:29:29,773 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T13:29:29,773 INFO [RS:0;c48e4b8eb196:44683 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T13:29:29,773 INFO [RS:0;c48e4b8eb196:44683 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T13:29:29,774 DEBUG [RS:1;c48e4b8eb196:36493 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T13:29:29,774 INFO [RS:1;c48e4b8eb196:36493 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T13:29:29,774 INFO [RS:1;c48e4b8eb196:36493 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T13:29:29,775 DEBUG [RS:2;c48e4b8eb196:39211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T13:29:29,775 INFO [RS:2;c48e4b8eb196:39211 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T13:29:29,775 INFO [RS:2;c48e4b8eb196:39211 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T13:29:29,799 WARN [c48e4b8eb196:45049 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T13:29:29,879 INFO [RS:1;c48e4b8eb196:36493 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T13:29:29,879 INFO [RS:0;c48e4b8eb196:44683 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T13:29:29,880 INFO [RS:2;c48e4b8eb196:39211 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T13:29:29,884 INFO [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c48e4b8eb196%2C39211%2C1733750968235, suffix=, logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235, archiveDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs, maxLogs=32 2024-12-09T13:29:29,887 INFO [RS:0;c48e4b8eb196:44683 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c48e4b8eb196%2C44683%2C1733750968041, suffix=, logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,44683,1733750968041, archiveDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs, maxLogs=32 2024-12-09T13:29:29,888 INFO [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c48e4b8eb196%2C36493%2C1733750968163, suffix=, logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163, archiveDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs, maxLogs=32 2024-12-09T13:29:29,905 DEBUG [RS:2;c48e4b8eb196:39211 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235/c48e4b8eb196%2C39211%2C1733750968235.1733750969886, exclude list is [], retry=0 2024-12-09T13:29:29,910 DEBUG [RS:1;c48e4b8eb196:36493 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163/c48e4b8eb196%2C36493%2C1733750968163.1733750969890, exclude list is [], retry=0 2024-12-09T13:29:29,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39425,DS-f429e652-0d3e-448e-8297-4199c22c018a,DISK] 2024-12-09T13:29:29,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33239,DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac,DISK] 2024-12-09T13:29:29,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42277,DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9,DISK] 2024-12-09T13:29:29,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33239,DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac,DISK] 2024-12-09T13:29:29,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42277,DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9,DISK] 2024-12-09T13:29:29,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39425,DS-f429e652-0d3e-448e-8297-4199c22c018a,DISK] 2024-12-09T13:29:29,919 DEBUG [RS:0;c48e4b8eb196:44683 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,44683,1733750968041/c48e4b8eb196%2C44683%2C1733750968041.1733750969889, exclude list is [], retry=0 2024-12-09T13:29:29,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39425,DS-f429e652-0d3e-448e-8297-4199c22c018a,DISK] 2024-12-09T13:29:29,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42277,DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9,DISK] 2024-12-09T13:29:29,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33239,DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac,DISK] 2024-12-09T13:29:29,975 INFO [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235/c48e4b8eb196%2C39211%2C1733750968235.1733750969886 2024-12-09T13:29:29,988 DEBUG [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42881:42881),(127.0.0.1/127.0.0.1:34425:34425),(127.0.0.1/127.0.0.1:42461:42461)] 2024-12-09T13:29:29,988 INFO [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163/c48e4b8eb196%2C36493%2C1733750968163.1733750969890 2024-12-09T13:29:29,998 DEBUG [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42461:42461),(127.0.0.1/127.0.0.1:42881:42881),(127.0.0.1/127.0.0.1:34425:34425)] 2024-12-09T13:29:30,001 INFO [RS:0;c48e4b8eb196:44683 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,44683,1733750968041/c48e4b8eb196%2C44683%2C1733750968041.1733750969889 2024-12-09T13:29:30,007 DEBUG [RS:0;c48e4b8eb196:44683 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42881:42881),(127.0.0.1/127.0.0.1:34425:34425),(127.0.0.1/127.0.0.1:42461:42461)] 2024-12-09T13:29:30,053 DEBUG [c48e4b8eb196:45049 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T13:29:30,066 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:29:30,077 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:29:30,077 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:29:30,077 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:29:30,077 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:29:30,077 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:29:30,077 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:29:30,077 INFO [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:29:30,077 INFO [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:29:30,077 INFO [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:29:30,078 DEBUG [c48e4b8eb196:45049 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:29:30,089 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:30,097 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c48e4b8eb196,39211,1733750968235, state=OPENING 2024-12-09T13:29:30,127 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T13:29:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:30,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:30,139 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,141 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,141 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,141 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,142 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T13:29:30,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:29:30,354 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T13:29:30,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45583, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:29:30,393 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T13:29:30,393 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T13:29:30,394 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T13:29:30,410 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c48e4b8eb196%2C39211%2C1733750968235.meta, suffix=.meta, logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235, archiveDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs, maxLogs=32 2024-12-09T13:29:30,463 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235/c48e4b8eb196%2C39211%2C1733750968235.meta.1733750970412.meta, exclude list is [], retry=0 2024-12-09T13:29:30,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39425,DS-f429e652-0d3e-448e-8297-4199c22c018a,DISK] 2024-12-09T13:29:30,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42277,DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9,DISK] 2024-12-09T13:29:30,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33239,DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac,DISK] 2024-12-09T13:29:30,508 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,39211,1733750968235/c48e4b8eb196%2C39211%2C1733750968235.meta.1733750970412.meta 2024-12-09T13:29:30,522 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42881:42881),(127.0.0.1/127.0.0.1:34425:34425),(127.0.0.1/127.0.0.1:42461:42461)] 2024-12-09T13:29:30,522 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:29:30,524 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-09T13:29:30,526 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:30,527 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T13:29:30,543 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T13:29:30,546 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T13:29:30,576 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T13:29:30,587 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:30,588 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T13:29:30,588 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T13:29:30,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T13:29:30,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T13:29:30,619 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:30,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:30,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T13:29:30,628 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T13:29:30,628 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:30,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:30,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T13:29:30,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T13:29:30,636 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:30,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:30,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T13:29:30,653 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T13:29:30,653 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:30,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:29:30,655 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T13:29:30,661 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740 2024-12-09T13:29:30,670 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740 2024-12-09T13:29:30,674 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T13:29:30,674 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T13:29:30,676 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T13:29:30,684 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T13:29:30,686 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69371668, jitterRate=0.03371840715408325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T13:29:30,687 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T13:29:30,691 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733750970599Writing region info on filesystem at 1733750970599Initializing all the Stores at 1733750970602 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750970602Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750970611 (+9 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750970611Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750970611Cleaning up temporary data from old regions at 1733750970674 (+63 ms)Running coprocessor post-open hooks at 1733750970687 (+13 ms)Region opened successfully at 1733750970691 (+4 ms) 2024-12-09T13:29:30,706 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733750970340 2024-12-09T13:29:30,721 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T13:29:30,722 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T13:29:30,725 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:30,727 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c48e4b8eb196,39211,1733750968235, state=OPEN 2024-12-09T13:29:30,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:29:30,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:29:30,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:29:30,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:29:30,763 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,763 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,763 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,763 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:29:30,763 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:30,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T13:29:30,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c48e4b8eb196,39211,1733750968235 in 620 msec 2024-12-09T13:29:30,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T13:29:30,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.1400 sec 2024-12-09T13:29:30,781 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T13:29:30,781 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T13:29:30,800 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:30,801 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:30,823 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:30,826 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45971, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:30,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4750 sec 2024-12-09T13:29:30,848 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733750970848, completionTime=-1 2024-12-09T13:29:30,852 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T13:29:30,852 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T13:29:30,931 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T13:29:30,931 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733751030931 2024-12-09T13:29:30,931 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733751090931 2024-12-09T13:29:30,931 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 79 msec 2024-12-09T13:29:30,934 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:29:30,946 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,45049,1733750967073-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:30,946 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,45049,1733750967073-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:30,946 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,45049,1733750967073-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:30,948 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c48e4b8eb196:45049, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:30,948 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:30,951 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:30,967 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T13:29:30,993 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.603sec 2024-12-09T13:29:30,994 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T13:29:30,995 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T13:29:30,996 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T13:29:30,996 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T13:29:30,997 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T13:29:30,997 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,45049,1733750967073-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T13:29:30,998 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,45049,1733750967073-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T13:29:31,010 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62913773, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:31,014 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T13:29:31,014 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T13:29:31,018 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:31,020 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:31,026 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T13:29:31,026 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:31,029 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5898bccd 2024-12-09T13:29:31,030 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:31,031 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T13:29:31,033 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50543, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T13:29:31,034 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:31,035 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:31,035 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ccec902, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:31,035 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:31,038 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:31,040 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T13:29:31,041 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:31,045 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:31,048 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9d1e90a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:31,048 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:31,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-09T13:29:31,054 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:29:31,055 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:31,057 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-09T13:29:31,057 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:31,058 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:31,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:29:31,060 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41176, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/test.cache.data in system properties and HBase conf 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir in system properties and HBase conf 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T13:29:31,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/nfs.dump.dir in system properties and HBase conf 2024-12-09T13:29:31,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir in system properties and HBase conf 2024-12-09T13:29:31,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T13:29:31,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T13:29:31,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T13:29:31,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T13:29:31,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741837_1013 (size=349) 2024-12-09T13:29:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741837_1013 (size=349) 2024-12-09T13:29:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741837_1013 (size=349) 2024-12-09T13:29:31,092 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 96d9654456b57311f15f8834c0cf2325, NAME => 'hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741838_1014 (size=36) 2024-12-09T13:29:31,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741838_1014 (size=36) 2024-12-09T13:29:31,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741838_1014 (size=36) 2024-12-09T13:29:31,113 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:31,113 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 96d9654456b57311f15f8834c0cf2325, disabling compactions & flushes 2024-12-09T13:29:31,113 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,113 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,113 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. after waiting 0 ms 2024-12-09T13:29:31,113 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,113 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,114 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 96d9654456b57311f15f8834c0cf2325: Waiting for close lock at 1733750971113Disabling compacts and flushes for region at 1733750971113Disabling writes for close at 1733750971113Writing region close event to WAL at 1733750971113Closed at 1733750971113 2024-12-09T13:29:31,116 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:29:31,123 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733750971117"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733750971117"}]},"ts":"1733750971117"} 2024-12-09T13:29:31,128 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T13:29:31,130 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:29:31,132 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733750971130"}]},"ts":"1733750971130"} 2024-12-09T13:29:31,137 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-09T13:29:31,137 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:29:31,139 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:29:31,139 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:29:31,139 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:29:31,139 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:29:31,140 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:29:31,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:29:31,141 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:29:31,141 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:29:31,141 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:29:31,141 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:29:31,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, ASSIGN}] 2024-12-09T13:29:31,146 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, ASSIGN 2024-12-09T13:29:31,148 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:29:31,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741839_1015 (size=592039) 2024-12-09T13:29:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741839_1015 (size=592039) 2024-12-09T13:29:31,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741839_1015 (size=592039) 2024-12-09T13:29:31,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T13:29:31,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741840_1016 (size=1663647) 2024-12-09T13:29:31,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741840_1016 (size=1663647) 2024-12-09T13:29:31,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741840_1016 (size=1663647) 2024-12-09T13:29:31,301 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T13:29:31,302 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=96d9654456b57311f15f8834c0cf2325, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:31,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, ASSIGN because future has completed 2024-12-09T13:29:31,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:29:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T13:29:31,499 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,499 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 96d9654456b57311f15f8834c0cf2325, NAME => 'hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:29:31,500 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. service=AccessControlService 2024-12-09T13:29:31,500 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:31,501 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,501 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:31,501 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,501 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,506 INFO [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,510 INFO [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96d9654456b57311f15f8834c0cf2325 columnFamilyName l 2024-12-09T13:29:31,510 DEBUG [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:31,512 INFO [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] regionserver.HStore(327): Store=96d9654456b57311f15f8834c0cf2325/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:31,512 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,516 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,517 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,517 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,528 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,538 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:29:31,539 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 96d9654456b57311f15f8834c0cf2325; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66237243, jitterRate=-0.012988165020942688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:29:31,540 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:29:31,542 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 96d9654456b57311f15f8834c0cf2325: Running coprocessor pre-open hook at 1733750971502Writing region info on filesystem at 1733750971502Initializing all the Stores at 1733750971504 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733750971504Cleaning up temporary data from old regions at 1733750971517 (+13 ms)Running coprocessor post-open hooks at 1733750971540 (+23 ms)Region opened successfully at 1733750971542 (+2 ms) 2024-12-09T13:29:31,548 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., pid=6, masterSystemTime=1733750971478 2024-12-09T13:29:31,554 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,554 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:29:31,555 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=96d9654456b57311f15f8834c0cf2325, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:31,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:29:31,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T13:29:31,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,39211,1733750968235 in 271 msec 2024-12-09T13:29:31,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T13:29:31,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, ASSIGN in 443 msec 2024-12-09T13:29:31,595 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:29:31,595 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733750971595"}]},"ts":"1733750971595"} 2024-12-09T13:29:31,600 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-09T13:29:31,604 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:29:31,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 560 msec 2024-12-09T13:29:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T13:29:31,705 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-09T13:29:31,723 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T13:29:31,725 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T13:29:31,725 INFO [master/c48e4b8eb196:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c48e4b8eb196,45049,1733750967073-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T13:29:32,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:33,106 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:33,386 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:33,394 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-09T13:29:33,396 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:33,419 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:33,419 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:33,419 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T13:29:33,428 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ce493c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:33,428 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34002cd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:33,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:33,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:33,432 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T13:29:33,437 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:33,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@544691f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:33,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@96821f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:33,593 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-09T13:29:33,594 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-09T13:29:33,594 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-09T13:29:33,597 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-09T13:29:33,680 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:34,121 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:34,472 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:34,498 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b668d97{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-33963-hadoop-yarn-common-3_4_1_jar-_-any-13739956536358029662/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-09T13:29:34,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f35e2e0{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-32931-hadoop-yarn-common-3_4_1_jar-_-any-4661479330903538696/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-09T13:29:34,499 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61b551a1{HTTP/1.1, (http/1.1)}{localhost:32931} 2024-12-09T13:29:34,499 INFO [Time-limited test {}] server.Server(415): Started @15670ms 2024-12-09T13:29:34,501 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@1772f5a9{HTTP/1.1, (http/1.1)}{localhost:33963} 2024-12-09T13:29:34,502 INFO [Thread-383 {}] server.Server(415): Started @15672ms 2024-12-09T13:29:34,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741841_1017 (size=5) 2024-12-09T13:29:34,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741841_1017 (size=5) 2024-12-09T13:29:34,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741841_1017 (size=5) 2024-12-09T13:29:35,552 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-09T13:29:35,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:35,649 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-09T13:29:35,650 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:35,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:35,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:35,657 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T13:29:35,660 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:35,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5011963e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:35,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18e8f4ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:35,796 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-09T13:29:35,796 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-09T13:29:35,796 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-09T13:29:35,796 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-09T13:29:35,822 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:35,854 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:35,988 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:36,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e958680{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-37101-hadoop-yarn-common-3_4_1_jar-_-any-4303841021445736022/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T13:29:36,005 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26728a0b{HTTP/1.1, (http/1.1)}{localhost:37101} 2024-12-09T13:29:36,005 INFO [Time-limited test {}] server.Server(415): Started @17176ms 2024-12-09T13:29:36,101 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:29:36,179 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T13:29:36,181 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-09T13:29:36,319 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-09T13:29:36,322 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:36,338 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-09T13:29:36,339 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T13:29:36,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T13:29:36,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T13:29:36,349 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T13:29:36,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T13:29:36,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@568cbbcc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,AVAILABLE} 2024-12-09T13:29:36,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30e4067{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-09T13:29:36,438 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-09T13:29:36,438 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-09T13:29:36,438 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-09T13:29:36,439 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-09T13:29:36,454 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:36,460 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:36,558 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-09T13:29:36,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57a5f2bc{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/java.io.tmpdir/jetty-localhost-38367-hadoop-yarn-common-3_4_1_jar-_-any-5289585438586300061/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T13:29:36,564 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4edc73e1{HTTP/1.1, (http/1.1)}{localhost:38367} 2024-12-09T13:29:36,565 INFO [Time-limited test {}] server.Server(415): Started @17735ms 2024-12-09T13:29:36,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-09T13:29:36,601 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:29:36,643 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=719, OpenFileDescriptor=778, MaxFileDescriptor=1048576, SystemLoadAverage=343, ProcessCount=11, AvailableMemoryMB=5920 2024-12-09T13:29:36,645 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-12-09T13:29:36,650 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T13:29:36,656 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:36,656 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7db8a531 2024-12-09T13:29:36,657 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T13:29:36,660 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35784, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T13:29:36,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:29:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:36,670 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:29:36,672 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:36,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-09T13:29:36,675 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:29:36,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T13:29:36,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741842_1018 (size=422) 2024-12-09T13:29:36,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741842_1018 (size=422) 2024-12-09T13:29:36,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741842_1018 (size=422) 2024-12-09T13:29:36,716 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 47e98b76f711545b77b78e05577538d0, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:36,720 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1d30f226bd6406aad18df926a90eb66c, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:36,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741844_1020 (size=83) 2024-12-09T13:29:36,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741844_1020 (size=83) 2024-12-09T13:29:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741844_1020 (size=83) 2024-12-09T13:29:36,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741843_1019 (size=83) 2024-12-09T13:29:36,754 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:36,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741843_1019 (size=83) 2024-12-09T13:29:36,754 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 1d30f226bd6406aad18df926a90eb66c, disabling compactions & flushes 2024-12-09T13:29:36,755 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:36,755 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:36,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741843_1019 (size=83) 2024-12-09T13:29:36,755 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. after waiting 0 ms 2024-12-09T13:29:36,755 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:36,755 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:36,755 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1d30f226bd6406aad18df926a90eb66c: Waiting for close lock at 1733750976754Disabling compacts and flushes for region at 1733750976754Disabling writes for close at 1733750976755 (+1 ms)Writing region close event to WAL at 1733750976755Closed at 1733750976755 2024-12-09T13:29:36,760 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:36,761 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 47e98b76f711545b77b78e05577538d0, disabling compactions & flushes 2024-12-09T13:29:36,761 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:36,761 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:36,761 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. after waiting 0 ms 2024-12-09T13:29:36,761 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:36,761 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:36,761 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 47e98b76f711545b77b78e05577538d0: Waiting for close lock at 1733750976761Disabling compacts and flushes for region at 1733750976761Disabling writes for close at 1733750976761Writing region close event to WAL at 1733750976761Closed at 1733750976761 2024-12-09T13:29:36,764 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:29:36,764 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733750976764"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733750976764"}]},"ts":"1733750976764"} 2024-12-09T13:29:36,765 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733750976764"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733750976764"}]},"ts":"1733750976764"} 2024-12-09T13:29:36,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T13:29:36,811 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:29:36,816 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:29:36,817 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733750976816"}]},"ts":"1733750976816"} 2024-12-09T13:29:36,825 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-09T13:29:36,825 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:29:36,829 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:29:36,829 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:29:36,829 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:29:36,829 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:29:36,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, ASSIGN}] 2024-12-09T13:29:36,836 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, ASSIGN 2024-12-09T13:29:36,836 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, ASSIGN 2024-12-09T13:29:36,838 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:29:36,840 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:29:36,989 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:29:36,989 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=47e98b76f711545b77b78e05577538d0, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:36,989 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1d30f226bd6406aad18df926a90eb66c, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:36,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, ASSIGN because future has completed 2024-12-09T13:29:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T13:29:36,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1d30f226bd6406aad18df926a90eb66c, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:29:37,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, ASSIGN because future has completed 2024-12-09T13:29:37,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 47e98b76f711545b77b78e05577538d0, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:29:37,158 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T13:29:37,175 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:37,176 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 47e98b76f711545b77b78e05577538d0, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:29:37,176 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. service=AccessControlService 2024-12-09T13:29:37,176 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:37,178 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,178 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:37,178 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,178 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,191 INFO [StoreOpener-47e98b76f711545b77b78e05577538d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,194 INFO [StoreOpener-47e98b76f711545b77b78e05577538d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 47e98b76f711545b77b78e05577538d0 columnFamilyName cf 2024-12-09T13:29:37,194 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39253, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:29:37,194 DEBUG [StoreOpener-47e98b76f711545b77b78e05577538d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:37,196 INFO [StoreOpener-47e98b76f711545b77b78e05577538d0-1 {}] regionserver.HStore(327): Store=47e98b76f711545b77b78e05577538d0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:37,196 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,197 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,198 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,199 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,199 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,200 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:37,200 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 1d30f226bd6406aad18df926a90eb66c, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:29:37,201 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. service=AccessControlService 2024-12-09T13:29:37,201 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:37,202 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,202 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:37,202 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,202 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,204 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,204 INFO [StoreOpener-1d30f226bd6406aad18df926a90eb66c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,206 INFO [StoreOpener-1d30f226bd6406aad18df926a90eb66c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d30f226bd6406aad18df926a90eb66c columnFamilyName cf 2024-12-09T13:29:37,207 DEBUG [StoreOpener-1d30f226bd6406aad18df926a90eb66c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:37,207 INFO [StoreOpener-1d30f226bd6406aad18df926a90eb66c-1 {}] regionserver.HStore(327): Store=1d30f226bd6406aad18df926a90eb66c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:37,208 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,208 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:29:37,209 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,209 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 47e98b76f711545b77b78e05577538d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63589366, jitterRate=-0.05244460701942444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:29:37,209 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,210 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,210 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,210 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 47e98b76f711545b77b78e05577538d0: Running coprocessor pre-open hook at 1733750977178Writing region info on filesystem at 1733750977179 (+1 ms)Initializing all the Stores at 1733750977182 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750977182Cleaning up temporary data from old regions at 1733750977199 (+17 ms)Running coprocessor post-open hooks at 1733750977209 (+10 ms)Region opened successfully at 1733750977210 (+1 ms) 2024-12-09T13:29:37,210 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,212 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0., pid=11, masterSystemTime=1733750977158 2024-12-09T13:29:37,214 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:37,217 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:37,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=47e98b76f711545b77b78e05577538d0, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:29:37,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:29:37,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 47e98b76f711545b77b78e05577538d0, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:29:37,222 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 1d30f226bd6406aad18df926a90eb66c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67982885, jitterRate=0.013023927807807922}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:29:37,222 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:37,223 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 1d30f226bd6406aad18df926a90eb66c: Running coprocessor pre-open hook at 1733750977202Writing region info on filesystem at 1733750977202Initializing all the Stores at 1733750977204 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750977204Cleaning up temporary data from old regions at 1733750977210 (+6 ms)Running coprocessor post-open hooks at 1733750977222 (+12 ms)Region opened successfully at 1733750977223 (+1 ms) 2024-12-09T13:29:37,224 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., pid=10, masterSystemTime=1733750977158 2024-12-09T13:29:37,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-09T13:29:37,230 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1d30f226bd6406aad18df926a90eb66c, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:29:37,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 47e98b76f711545b77b78e05577538d0, server=c48e4b8eb196,39211,1733750968235 in 224 msec 2024-12-09T13:29:37,231 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:37,231 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:37,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, ASSIGN in 400 msec 2024-12-09T13:29:37,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1d30f226bd6406aad18df926a90eb66c, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:29:37,243 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T13:29:37,243 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 1d30f226bd6406aad18df926a90eb66c, server=c48e4b8eb196,36493,1733750968163 in 241 msec 2024-12-09T13:29:37,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-09T13:29:37,249 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, ASSIGN in 413 msec 2024-12-09T13:29:37,250 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:29:37,251 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733750977250"}]},"ts":"1733750977250"} 2024-12-09T13:29:37,254 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-09T13:29:37,257 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:29:37,261 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-09T13:29:37,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:29:37,280 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:37,280 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:37,281 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:37,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55075, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-09T13:29:37,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:29:37,289 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T13:29:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:37,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:29:37,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:37,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:37,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:37,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:37,380 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:37,380 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:37,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 716 msec 2024-12-09T13:29:37,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:29:37,485 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-09T13:29:37,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T13:29:37,486 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T13:29:37,488 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:29:37,488 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-09T13:29:37,489 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T13:29:37,489 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T13:29:37,490 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:37,490 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T13:29:37,492 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-09T13:29:37,492 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-09T13:29:37,493 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:29:37,493 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-09T13:29:37,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-09T13:29:37,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-09T13:29:37,494 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T13:29:37,494 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T13:29:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T13:29:37,814 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:29:37,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-12-09T13:29:37,816 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:29:37,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-12-09T13:29:37,825 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:29:37,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-12-09T13:29:37,830 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:37,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T13:29:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733750977847 (current time:1733750977847). 2024-12-09T13:29:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:29:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-09T13:29:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:29:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70975e5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:37,852 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:37,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:37,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:37,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bbe20d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:37,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:37,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:37,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:37,854 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:37,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8c81b6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:37,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:37,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:37,861 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:37,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:29:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:29:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:37,870 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@551faa83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:37,873 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:37,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:37,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:37,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4902fb3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:37,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:37,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:37,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:37,876 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35838, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:37,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@375f7f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:37,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:37,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:37,880 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:37,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:29:37,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:29:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:29:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:37,888 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T13:29:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:29:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T13:29:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-09T13:29:37,904 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:29:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T13:29:37,912 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:29:37,927 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:29:37,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741845_1021 (size=215) 2024-12-09T13:29:37,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741845_1021 (size=215) 2024-12-09T13:29:37,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741845_1021 (size=215) 2024-12-09T13:29:37,943 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:29:37,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c}] 2024-12-09T13:29:37,951 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:37,952 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T13:29:38,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-09T13:29:38,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-09T13:29:38,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:38,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:38,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 1d30f226bd6406aad18df926a90eb66c: 2024-12-09T13:29:38,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T13:29:38,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 47e98b76f711545b77b78e05577538d0: 2024-12-09T13:29:38,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T13:29:38,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:29:38,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:29:38,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:29:38,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:29:38,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741847_1023 (size=86) 2024-12-09T13:29:38,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741847_1023 (size=86) 2024-12-09T13:29:38,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741847_1023 (size=86) 2024-12-09T13:29:38,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:38,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741846_1022 (size=86) 2024-12-09T13:29:38,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741846_1022 (size=86) 2024-12-09T13:29:38,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741846_1022 (size=86) 2024-12-09T13:29:38,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-09T13:29:38,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:38,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T13:29:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T13:29:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-09T13:29:38,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:38,146 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:38,146 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:38,146 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:38,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0 in 202 msec 2024-12-09T13:29:38,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-09T13:29:38,153 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:29:38,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c in 202 msec 2024-12-09T13:29:38,155 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:29:38,158 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:29:38,158 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,161 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741848_1024 (size=597) 2024-12-09T13:29:38,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741848_1024 (size=597) 2024-12-09T13:29:38,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741848_1024 (size=597) 2024-12-09T13:29:38,195 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:29:38,208 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:29:38,209 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,212 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:29:38,213 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-09T13:29:38,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 316 msec 2024-12-09T13:29:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-09T13:29:38,224 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:29:38,236 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='03936b7588c34ae5521a88e20f752835b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:29:38,238 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='10e6d0b82ec31787e0da6b1652662facc', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:29:38,244 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='3ccd576bfc60f5b5e8fd8f49575231682', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:29:38,245 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='28eadb6ecec5423fe8d5740d039a670ef', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:29:38,246 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:38,247 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='4560ff7b6ca7e9b2b7d56413eb2465cb4', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:29:38,249 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='697ea124c0c8a4b7018e5a48e601d6ab0', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:29:38,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42724, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:38,249 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='519d7a7d658a4d02ecd806377f6a46023', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:29:38,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:29:38,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:29:38,256 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:38,260 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,261 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:38,262 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:29:38,264 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:38,278 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:38,289 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:38,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T13:29:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733750978295 (current time:1733750978295). 2024-12-09T13:29:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:29:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-09T13:29:38,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:29:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40a8fff9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:38,298 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:38,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:38,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:38,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47ed5353, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:38,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:38,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:38,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:38,300 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:38,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36ef34b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:38,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:38,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:38,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:38,307 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:29:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:29:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:38,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:38,307 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36aec58e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:38,309 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:38,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:38,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:38,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@726d572f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:38,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:38,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:38,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:38,312 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:38,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e372920, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:38,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:38,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:38,316 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:38,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:29:38,321 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:29:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:29:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:38,321 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T13:29:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:29:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T13:29:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-09T13:29:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T13:29:38,326 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:29:38,328 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:29:38,332 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:29:38,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741849_1025 (size=210) 2024-12-09T13:29:38,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741849_1025 (size=210) 2024-12-09T13:29:38,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741849_1025 (size=210) 2024-12-09T13:29:38,345 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:29:38,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c}] 2024-12-09T13:29:38,348 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:38,348 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T13:29:38,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-09T13:29:38,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-09T13:29:38,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:38,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:38,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 47e98b76f711545b77b78e05577538d0 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-09T13:29:38,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 1d30f226bd6406aad18df926a90eb66c 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-09T13:29:38,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/.tmp/cf/561f9d8a344640358eeb707acec3031d is 71, key is 13d0b01d51924f6c12f01aebb928e162/cf:q/1733750978252/Put/seqid=0 2024-12-09T13:29:38,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/.tmp/cf/29af96df0ef3442fa669c7a19acff786 is 71, key is 02edf80815b692f736867fb50ba1a68f/cf:q/1733750978251/Put/seqid=0 2024-12-09T13:29:38,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741850_1026 (size=5286) 2024-12-09T13:29:38,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741850_1026 (size=5286) 2024-12-09T13:29:38,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741850_1026 (size=5286) 2024-12-09T13:29:38,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741851_1027 (size=8326) 2024-12-09T13:29:38,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/.tmp/cf/29af96df0ef3442fa669c7a19acff786 2024-12-09T13:29:38,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741851_1027 (size=8326) 2024-12-09T13:29:38,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741851_1027 (size=8326) 2024-12-09T13:29:38,609 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/.tmp/cf/561f9d8a344640358eeb707acec3031d 2024-12-09T13:29:38,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T13:29:38,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/.tmp/cf/561f9d8a344640358eeb707acec3031d as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf/561f9d8a344640358eeb707acec3031d 2024-12-09T13:29:38,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/.tmp/cf/29af96df0ef3442fa669c7a19acff786 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf/29af96df0ef3442fa669c7a19acff786 2024-12-09T13:29:38,694 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf/561f9d8a344640358eeb707acec3031d, entries=47, sequenceid=6, filesize=8.1 K 2024-12-09T13:29:38,694 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf/29af96df0ef3442fa669c7a19acff786, entries=3, sequenceid=6, filesize=5.2 K 2024-12-09T13:29:38,702 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 1d30f226bd6406aad18df926a90eb66c in 195ms, sequenceid=6, compaction requested=false 2024-12-09T13:29:38,702 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 47e98b76f711545b77b78e05577538d0 in 195ms, sequenceid=6, compaction requested=false 2024-12-09T13:29:38,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-09T13:29:38,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 1d30f226bd6406aad18df926a90eb66c: 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 47e98b76f711545b77b78e05577538d0: 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:29:38,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:29:38,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf/29af96df0ef3442fa669c7a19acff786] hfiles 2024-12-09T13:29:38,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf/561f9d8a344640358eeb707acec3031d] hfiles 2024-12-09T13:29:38,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf/561f9d8a344640358eeb707acec3031d for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf/29af96df0ef3442fa669c7a19acff786 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741852_1028 (size=125) 2024-12-09T13:29:38,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741852_1028 (size=125) 2024-12-09T13:29:38,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741852_1028 (size=125) 2024-12-09T13:29:38,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:29:38,731 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-09T13:29:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-09T13:29:38,731 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:38,732 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0 2024-12-09T13:29:38,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 47e98b76f711545b77b78e05577538d0 in 388 msec 2024-12-09T13:29:38,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741853_1029 (size=125) 2024-12-09T13:29:38,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741853_1029 (size=125) 2024-12-09T13:29:38,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741853_1029 (size=125) 2024-12-09T13:29:38,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:29:38,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-09T13:29:38,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-09T13:29:38,739 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:38,740 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:29:38,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-12-09T13:29:38,745 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:29:38,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1d30f226bd6406aad18df926a90eb66c in 396 msec 2024-12-09T13:29:38,746 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:29:38,748 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:29:38,748 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,749 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741854_1030 (size=675) 2024-12-09T13:29:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741854_1030 (size=675) 2024-12-09T13:29:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741854_1030 (size=675) 2024-12-09T13:29:38,770 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:29:38,780 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:29:38,781 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,783 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:29:38,783 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-09T13:29:38,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 460 msec 2024-12-09T13:29:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-09T13:29:38,954 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:29:38,979 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T13:29:38,980 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T13:29:38,981 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T13:29:38,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:29:38,985 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-09T13:29:38,986 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42734, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:29:38,986 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:29:38,987 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-09T13:29:38,987 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-09T13:29:38,990 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:29:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:38,996 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:29:38,996 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:38,996 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-09T13:29:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T13:29:38,998 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:29:39,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741855_1031 (size=390) 2024-12-09T13:29:39,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741855_1031 (size=390) 2024-12-09T13:29:39,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741855_1031 (size=390) 2024-12-09T13:29:39,043 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7a88374f70350ec93398ce71231ae080, NAME => 'testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:39,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741856_1032 (size=75) 2024-12-09T13:29:39,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741856_1032 (size=75) 2024-12-09T13:29:39,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741856_1032 (size=75) 2024-12-09T13:29:39,095 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:39,095 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 7a88374f70350ec93398ce71231ae080, disabling compactions & flushes 2024-12-09T13:29:39,096 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,096 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,096 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. after waiting 0 ms 2024-12-09T13:29:39,096 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,096 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,096 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7a88374f70350ec93398ce71231ae080: Waiting for close lock at 1733750979095Disabling compacts and flushes for region at 1733750979095Disabling writes for close at 1733750979096 (+1 ms)Writing region close event to WAL at 1733750979096Closed at 1733750979096 2024-12-09T13:29:39,098 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:29:39,099 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733750979098"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733750979098"}]},"ts":"1733750979098"} 2024-12-09T13:29:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T13:29:39,106 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T13:29:39,108 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:29:39,108 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733750979108"}]},"ts":"1733750979108"} 2024-12-09T13:29:39,112 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-09T13:29:39,112 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:29:39,114 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:29:39,114 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:29:39,114 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:29:39,114 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:29:39,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, ASSIGN}] 2024-12-09T13:29:39,116 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, ASSIGN 2024-12-09T13:29:39,118 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:29:39,268 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T13:29:39,269 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=7a88374f70350ec93398ce71231ae080, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:39,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, ASSIGN because future has completed 2024-12-09T13:29:39,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7a88374f70350ec93398ce71231ae080, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:29:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T13:29:39,429 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T13:29:39,475 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42789, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:29:39,490 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,490 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 7a88374f70350ec93398ce71231ae080, NAME => 'testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:29:39,491 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. service=AccessControlService 2024-12-09T13:29:39,491 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:39,491 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,491 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:39,492 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,492 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,498 INFO [StoreOpener-7a88374f70350ec93398ce71231ae080-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,502 INFO [StoreOpener-7a88374f70350ec93398ce71231ae080-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a88374f70350ec93398ce71231ae080 columnFamilyName cf 2024-12-09T13:29:39,502 DEBUG [StoreOpener-7a88374f70350ec93398ce71231ae080-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:39,503 INFO [StoreOpener-7a88374f70350ec93398ce71231ae080-1 {}] regionserver.HStore(327): Store=7a88374f70350ec93398ce71231ae080/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:39,504 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,505 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,506 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,507 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,507 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,513 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,544 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:29:39,546 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 7a88374f70350ec93398ce71231ae080; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70171058, jitterRate=0.04563024640083313}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:29:39,546 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:39,547 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 7a88374f70350ec93398ce71231ae080: Running coprocessor pre-open hook at 1733750979492Writing region info on filesystem at 1733750979492Initializing all the Stores at 1733750979497 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750979498 (+1 ms)Cleaning up temporary data from old regions at 1733750979507 (+9 ms)Running coprocessor post-open hooks at 1733750979546 (+39 ms)Region opened successfully at 1733750979547 (+1 ms) 2024-12-09T13:29:39,549 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., pid=20, masterSystemTime=1733750979428 2024-12-09T13:29:39,552 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,552 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:39,553 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=7a88374f70350ec93398ce71231ae080, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:39,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7a88374f70350ec93398ce71231ae080, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:29:39,564 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-09T13:29:39,564 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 7a88374f70350ec93398ce71231ae080, server=c48e4b8eb196,44683,1733750968041 in 286 msec 2024-12-09T13:29:39,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-09T13:29:39,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, ASSIGN in 450 msec 2024-12-09T13:29:39,574 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:29:39,575 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733750979575"}]},"ts":"1733750979575"} 2024-12-09T13:29:39,578 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-09T13:29:39,580 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:29:39,580 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-09T13:29:39,586 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T13:29:39,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T13:29:39,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:39,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:39,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:39,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:29:39,666 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,667 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:29:39,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 676 msec 2024-12-09T13:29:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-09T13:29:40,133 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:29:40,134 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:40,138 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T13:29:42,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741857_1033 (size=134217728) 2024-12-09T13:29:42,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741857_1033 (size=134217728) 2024-12-09T13:29:42,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741857_1033 (size=134217728) 2024-12-09T13:29:42,178 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-09T13:29:42,733 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:29:44,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741858_1034 (size=134217728) 2024-12-09T13:29:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741858_1034 (size=134217728) 2024-12-09T13:29:44,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741858_1034 (size=134217728) 2024-12-09T13:29:45,009 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733750980143/Put/seqid=0 2024-12-09T13:29:45,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741859_1035 (size=51979256) 2024-12-09T13:29:45,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741859_1035 (size=51979256) 2024-12-09T13:29:45,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741859_1035 (size=51979256) 2024-12-09T13:29:45,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e3ac57a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:45,021 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:45,022 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:45,024 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:45,024 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:45,025 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:45,025 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f5cd53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:45,025 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:45,026 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:45,026 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:45,028 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:45,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25addc6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:45,030 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:45,032 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:45,033 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:45,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:45,052 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:45819/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-09T13:29:45,052 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T13:29:45,054 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is c48e4b8eb196,45049,1733750967073 2024-12-09T13:29:45,054 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@72fe469 2024-12-09T13:29:45,054 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T13:29:45,057 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T13:29:45,065 WARN [IPC Server handler 4 on default port 45819 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-09T13:29:45,072 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:29:45,077 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:45,080 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49728, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:45,085 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:45,105 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:45819/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-09T13:29:45,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:45,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:45,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:45,139 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58973, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T13:29:45,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T13:29:45,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:58973 deadline: 1733751045139, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-09T13:29:45,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:29:45,178 WARN [IPC Server handler 4 on default port 45819 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-09T13:29:45,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:45819/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/output/cf/test_file for inclusion in 7a88374f70350ec93398ce71231ae080/cf 2024-12-09T13:29:45,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-09T13:29:45,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-09T13:29:45,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:45819/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-09T13:29:45,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(2603): Flush status journal for 7a88374f70350ec93398ce71231ae080: 2024-12-09T13:29:45,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:45819/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/output/cf/test_file to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/staging/jenkins__testExportFileSystemStateWithSplitRegion__so9c00utuhdt8h4vqf91npfmk6mfr4bpge1db9j1smfn09s00cj02ttpe2am35cl/cf/test_file 2024-12-09T13:29:45,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/staging/jenkins__testExportFileSystemStateWithSplitRegion__so9c00utuhdt8h4vqf91npfmk6mfr4bpge1db9j1smfn09s00cj02ttpe2am35cl/cf/test_file as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ 2024-12-09T13:29:45,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/staging/jenkins__testExportFileSystemStateWithSplitRegion__so9c00utuhdt8h4vqf91npfmk6mfr4bpge1db9j1smfn09s00cj02ttpe2am35cl/cf/test_file into 7a88374f70350ec93398ce71231ae080/cf as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ - updating store file list. 2024-12-09T13:29:45,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 297b6e3d5cf24e119b169d5abb242636_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T13:29:45,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ into 7a88374f70350ec93398ce71231ae080/cf 2024-12-09T13:29:45,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/staging/jenkins__testExportFileSystemStateWithSplitRegion__so9c00utuhdt8h4vqf91npfmk6mfr4bpge1db9j1smfn09s00cj02ttpe2am35cl/cf/test_file into 7a88374f70350ec93398ce71231ae080/cf (new location: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_) 2024-12-09T13:29:45,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/staging/jenkins__testExportFileSystemStateWithSplitRegion__so9c00utuhdt8h4vqf91npfmk6mfr4bpge1db9j1smfn09s00cj02ttpe2am35cl/cf/test_file 2024-12-09T13:29:45,317 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T13:29:45,317 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:29:45,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:45,318 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:45,318 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:45,319 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:45,319 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=c48e4b8eb196:44683 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-09T13:29:45,320 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-09T13:29:45,320 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2 from cache 2024-12-09T13:29:45,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 336 connection: 172.17.0.2:49728 deadline: 1733751045318 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:49728 2024-12-09T13:29:45,329 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:29:45,341 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:45,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:45,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=7a88374f70350ec93398ce71231ae080, daughterA=31220c1b1827b53b6a58d3a59622a0f0, daughterB=829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:45,358 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=7a88374f70350ec93398ce71231ae080, daughterA=31220c1b1827b53b6a58d3a59622a0f0, daughterB=829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:45,358 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=7a88374f70350ec93398ce71231ae080, daughterA=31220c1b1827b53b6a58d3a59622a0f0, daughterB=829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:45,358 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=7a88374f70350ec93398ce71231ae080, daughterA=31220c1b1827b53b6a58d3a59622a0f0, daughterB=829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:45,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T13:29:45,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, UNASSIGN}] 2024-12-09T13:29:45,370 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, UNASSIGN 2024-12-09T13:29:45,372 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=7a88374f70350ec93398ce71231ae080, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:45,375 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=c48e4b8eb196,44683,1733750968041, table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T13:29:45,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, UNASSIGN because future has completed 2024-12-09T13:29:45,378 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T13:29:45,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7a88374f70350ec93398ce71231ae080, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:29:45,424 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c48e4b8eb196:39211 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-09T13:29:45,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T13:29:45,541 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:45,541 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T13:29:45,543 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 7a88374f70350ec93398ce71231ae080, disabling compactions & flushes 2024-12-09T13:29:45,543 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:45,543 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:45,543 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. after waiting 0 ms 2024-12-09T13:29:45,543 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:45,551 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-09T13:29:45,556 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:29:45,556 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080. 2024-12-09T13:29:45,556 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 7a88374f70350ec93398ce71231ae080: Waiting for close lock at 1733750985542Running coprocessor pre-close hooks at 1733750985542Disabling compacts and flushes for region at 1733750985542Disabling writes for close at 1733750985543 (+1 ms)Writing region close event to WAL at 1733750985545 (+2 ms)Running coprocessor post-close hooks at 1733750985553 (+8 ms)Closed at 1733750985556 (+3 ms) 2024-12-09T13:29:45,563 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:45,564 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=7a88374f70350ec93398ce71231ae080, regionState=CLOSED 2024-12-09T13:29:45,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7a88374f70350ec93398ce71231ae080, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:29:45,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-09T13:29:45,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 7a88374f70350ec93398ce71231ae080, server=c48e4b8eb196,44683,1733750968041 in 195 msec 2024-12-09T13:29:45,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-09T13:29:45,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=7a88374f70350ec93398ce71231ae080, UNASSIGN in 212 msec 2024-12-09T13:29:45,605 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:45,612 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=7a88374f70350ec93398ce71231ae080, threads=1 2024-12-09T13:29:45,620 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ for region: 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:45,648 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 297b6e3d5cf24e119b169d5abb242636_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T13:29:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T13:29:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T13:29:46,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741860_1036 (size=21) 2024-12-09T13:29:46,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741860_1036 (size=21) 2024-12-09T13:29:46,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741860_1036 (size=21) 2024-12-09T13:29:46,340 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 297b6e3d5cf24e119b169d5abb242636_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-09T13:29:46,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741861_1037 (size=21) 2024-12-09T13:29:46,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741861_1037 (size=21) 2024-12-09T13:29:46,408 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ for region: 7a88374f70350ec93398ce71231ae080 2024-12-09T13:29:46,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741861_1037 (size=21) 2024-12-09T13:29:46,412 DEBUG [PEWorker-2 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 7a88374f70350ec93398ce71231ae080 Daughter A: [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080] storefiles, Daughter B: [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080] storefiles. 2024-12-09T13:29:46,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741862_1038 (size=76) 2024-12-09T13:29:46,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741862_1038 (size=76) 2024-12-09T13:29:46,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741862_1038 (size=76) 2024-12-09T13:29:46,497 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T13:29:46,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741863_1039 (size=76) 2024-12-09T13:29:46,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741863_1039 (size=76) 2024-12-09T13:29:46,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741863_1039 (size=76) 2024-12-09T13:29:46,548 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:46,592 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-09T13:29:46,616 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-09T13:29:46,621 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733750986620"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733750986620"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733750986620"}]},"ts":"1733750986620"} 2024-12-09T13:29:46,622 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733750986620"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733750986620"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733750986620"}]},"ts":"1733750986620"} 2024-12-09T13:29:46,622 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733750986620"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733750986620"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733750986620"}]},"ts":"1733750986620"} 2024-12-09T13:29:46,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, ASSIGN}] 2024-12-09T13:29:46,652 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, ASSIGN 2024-12-09T13:29:46,655 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, ASSIGN; state=SPLITTING_NEW, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:29:46,659 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, ASSIGN 2024-12-09T13:29:46,661 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, ASSIGN; state=SPLITTING_NEW, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:29:46,805 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:29:46,806 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=31220c1b1827b53b6a58d3a59622a0f0, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:46,806 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=829531b2168b02ea6a230665bf08db6a, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:46,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, ASSIGN because future has completed 2024-12-09T13:29:46,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:29:46,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, ASSIGN because future has completed 2024-12-09T13:29:46,811 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 829531b2168b02ea6a230665bf08db6a, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:29:46,976 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:29:46,976 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 31220c1b1827b53b6a58d3a59622a0f0, NAME => 'testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.', STARTKEY => '', ENDKEY => '5'} 2024-12-09T13:29:46,977 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. service=AccessControlService 2024-12-09T13:29:46,977 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:46,977 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:46,978 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:46,978 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:46,978 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:46,991 INFO [StoreOpener-31220c1b1827b53b6a58d3a59622a0f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:46,994 INFO [StoreOpener-31220c1b1827b53b6a58d3a59622a0f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31220c1b1827b53b6a58d3a59622a0f0 columnFamilyName cf 2024-12-09T13:29:46,994 DEBUG [StoreOpener-31220c1b1827b53b6a58d3a59622a0f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:47,018 DEBUG [StoreFileOpener-31220c1b1827b53b6a58d3a59622a0f0-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080: NONE, but ROW specified in column family configuration 2024-12-09T13:29:47,050 DEBUG [StoreOpener-31220c1b1827b53b6a58d3a59622a0f0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_-bottom 2024-12-09T13:29:47,051 INFO [StoreOpener-31220c1b1827b53b6a58d3a59622a0f0-1 {}] regionserver.HStore(327): Store=31220c1b1827b53b6a58d3a59622a0f0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:47,052 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,053 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,056 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,057 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,057 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,065 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,066 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 31220c1b1827b53b6a58d3a59622a0f0; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69952913, jitterRate=0.04237963259220123}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:29:47,067 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,068 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 31220c1b1827b53b6a58d3a59622a0f0: Running coprocessor pre-open hook at 1733750986978Writing region info on filesystem at 1733750986978Initializing all the Stores at 1733750986980 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750986980Cleaning up temporary data from old regions at 1733750987057 (+77 ms)Running coprocessor post-open hooks at 1733750987067 (+10 ms)Region opened successfully at 1733750987068 (+1 ms) 2024-12-09T13:29:47,069 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0., pid=26, masterSystemTime=1733750986964 2024-12-09T13:29:47,070 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.,because compaction is disabled. 2024-12-09T13:29:47,073 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:29:47,073 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:29:47,074 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:29:47,074 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 829531b2168b02ea6a230665bf08db6a, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.', STARTKEY => '5', ENDKEY => ''} 2024-12-09T13:29:47,075 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. service=AccessControlService 2024-12-09T13:29:47,075 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=31220c1b1827b53b6a58d3a59622a0f0, regionState=OPEN, openSeqNum=7, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:47,075 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:29:47,075 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,075 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:29:47,075 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,075 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:29:47,080 INFO [StoreOpener-829531b2168b02ea6a230665bf08db6a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,087 INFO [StoreOpener-829531b2168b02ea6a230665bf08db6a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 829531b2168b02ea6a230665bf08db6a columnFamilyName cf 2024-12-09T13:29:47,087 DEBUG [StoreOpener-829531b2168b02ea6a230665bf08db6a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:47,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-09T13:29:47,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0, server=c48e4b8eb196,44683,1733750968041 in 272 msec 2024-12-09T13:29:47,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, ASSIGN in 446 msec 2024-12-09T13:29:47,117 DEBUG [StoreFileOpener-829531b2168b02ea6a230665bf08db6a-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080: NONE, but ROW specified in column family configuration 2024-12-09T13:29:47,119 DEBUG [StoreOpener-829531b2168b02ea6a230665bf08db6a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_-top 2024-12-09T13:29:47,120 INFO [StoreOpener-829531b2168b02ea6a230665bf08db6a-1 {}] regionserver.HStore(327): Store=829531b2168b02ea6a230665bf08db6a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:29:47,120 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,121 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,123 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,124 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,124 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,126 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,128 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 829531b2168b02ea6a230665bf08db6a; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69752335, jitterRate=0.03939078748226166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:29:47,128 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:47,128 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 829531b2168b02ea6a230665bf08db6a: Running coprocessor pre-open hook at 1733750987076Writing region info on filesystem at 1733750987076Initializing all the Stores at 1733750987078 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733750987078Cleaning up temporary data from old regions at 1733750987124 (+46 ms)Running coprocessor post-open hooks at 1733750987128 (+4 ms)Region opened successfully at 1733750987128 2024-12-09T13:29:47,129 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a., pid=27, masterSystemTime=1733750986964 2024-12-09T13:29:47,129 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.,because compaction is disabled. 2024-12-09T13:29:47,131 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:29:47,132 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:29:47,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=829531b2168b02ea6a230665bf08db6a, regionState=OPEN, openSeqNum=7, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:29:47,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 829531b2168b02ea6a230665bf08db6a, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:29:47,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-09T13:29:47,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 829531b2168b02ea6a230665bf08db6a, server=c48e4b8eb196,44683,1733750968041 in 326 msec 2024-12-09T13:29:47,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-09T13:29:47,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, ASSIGN in 493 msec 2024-12-09T13:29:47,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=7a88374f70350ec93398ce71231ae080, daughterA=31220c1b1827b53b6a58d3a59622a0f0, daughterB=829531b2168b02ea6a230665bf08db6a in 1.7940 sec 2024-12-09T13:29:47,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:47,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T13:29:47,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-09T13:29:47,514 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-09T13:29:47,514 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:29:47,519 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T13:29:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733750987519 (current time:1733750987519). 2024-12-09T13:29:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:29:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-09T13:29:47,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:29:47,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@413c09a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:47,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:47,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:47,521 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:47,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:47,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:47,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35f9e2b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:47,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:47,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:47,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:47,523 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:47,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@775f820a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:47,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:47,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:47,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:47,526 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:47,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:29:47,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:29:47,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:47,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:47,528 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:47,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d007403, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:47,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:29:47,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:29:47,530 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:29:47,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:29:47,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:29:47,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77b1d0c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:47,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:29:47,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:29:47,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:47,532 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:29:47,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57338aa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:29:47,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:29:47,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:29:47,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:29:47,536 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:29:47,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:29:47,540 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:29:47,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:29:47,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:47,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:29:47,541 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:29:47,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-09T13:29:47,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:29:47,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-09T13:29:47,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-09T13:29:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T13:29:47,545 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:29:47,547 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:29:47,551 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:29:47,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741864_1040 (size=197) 2024-12-09T13:29:47,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741864_1040 (size=197) 2024-12-09T13:29:47,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741864_1040 (size=197) 2024-12-09T13:29:47,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T13:29:47,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T13:29:47,964 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:29:47,965 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 829531b2168b02ea6a230665bf08db6a}] 2024-12-09T13:29:47,967 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:47,968 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:48,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-09T13:29:48,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-09T13:29:48,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:29:48,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 31220c1b1827b53b6a58d3a59622a0f0: 2024-12-09T13:29:48,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T13:29:48,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 829531b2168b02ea6a230665bf08db6a: 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_-bottom] hfiles 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_-top] hfiles 2024-12-09T13:29:48,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741865_1041 (size=182) 2024-12-09T13:29:48,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741865_1041 (size=182) 2024-12-09T13:29:48,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741865_1041 (size=182) 2024-12-09T13:29:48,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:29:48,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-09T13:29:48,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-09T13:29:48,140 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:48,140 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:29:48,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0 in 176 msec 2024-12-09T13:29:48,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741866_1042 (size=182) 2024-12-09T13:29:48,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741866_1042 (size=182) 2024-12-09T13:29:48,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741866_1042 (size=182) 2024-12-09T13:29:48,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:29:48,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-09T13:29:48,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-09T13:29:48,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:48,146 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:29:48,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-09T13:29:48,150 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:29:48,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 829531b2168b02ea6a230665bf08db6a in 182 msec 2024-12-09T13:29:48,152 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-09T13:29:48,152 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-09T13:29:48,152 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:29:48,154 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_] hfiles 2024-12-09T13:29:48,154 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ 2024-12-09T13:29:48,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741867_1043 (size=129) 2024-12-09T13:29:48,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741867_1043 (size=129) 2024-12-09T13:29:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741867_1043 (size=129) 2024-12-09T13:29:48,165 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 7a88374f70350ec93398ce71231ae080, NAME => 'testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,166 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:29:48,167 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:29:48,167 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,168 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T13:29:48,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741868_1044 (size=891) 2024-12-09T13:29:48,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741868_1044 (size=891) 2024-12-09T13:29:48,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741868_1044 (size=891) 2024-12-09T13:29:48,186 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:29:48,194 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:29:48,195 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,198 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:29:48,198 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-09T13:29:48,202 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 656 msec 2024-12-09T13:29:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-09T13:29:48,683 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:29:48,684 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683 2024-12-09T13:29:48,684 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:48,717 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:29:48,717 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,721 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:29:48,727 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:29:48,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741869_1045 (size=197) 2024-12-09T13:29:48,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741869_1045 (size=197) 2024-12-09T13:29:48,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741869_1045 (size=197) 2024-12-09T13:29:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741870_1046 (size=891) 2024-12-09T13:29:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741870_1046 (size=891) 2024-12-09T13:29:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741870_1046 (size=891) 2024-12-09T13:29:48,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:48,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:48,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-16837832945097001641.jar 2024-12-09T13:29:49,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,794 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-16793374060964423113.jar 2024-12-09T13:29:49,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:29:49,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:29:49,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:29:49,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:29:49,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:29:49,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:29:49,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:29:49,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:29:49,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:29:49,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:29:49,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:29:49,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:29:49,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:29:49,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:29:49,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:29:49,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:29:49,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:29:49,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:29:49,803 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:29:49,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741871_1047 (size=6425017) 2024-12-09T13:29:49,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741871_1047 (size=6425017) 2024-12-09T13:29:49,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741871_1047 (size=6425017) 2024-12-09T13:29:49,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741872_1048 (size=5175431) 2024-12-09T13:29:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741872_1048 (size=5175431) 2024-12-09T13:29:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741872_1048 (size=5175431) 2024-12-09T13:29:49,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741873_1049 (size=1877034) 2024-12-09T13:29:49,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741873_1049 (size=1877034) 2024-12-09T13:29:49,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741873_1049 (size=1877034) 2024-12-09T13:29:50,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741874_1050 (size=20406) 2024-12-09T13:29:50,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741874_1050 (size=20406) 2024-12-09T13:29:50,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741874_1050 (size=20406) 2024-12-09T13:29:50,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741875_1051 (size=24096) 2024-12-09T13:29:50,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741875_1051 (size=24096) 2024-12-09T13:29:50,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741875_1051 (size=24096) 2024-12-09T13:29:50,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741876_1052 (size=503880) 2024-12-09T13:29:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741876_1052 (size=503880) 2024-12-09T13:29:50,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741876_1052 (size=503880) 2024-12-09T13:29:50,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741877_1053 (size=4695811) 2024-12-09T13:29:50,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741877_1053 (size=4695811) 2024-12-09T13:29:50,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741877_1053 (size=4695811) 2024-12-09T13:29:50,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741878_1054 (size=111872) 2024-12-09T13:29:50,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741878_1054 (size=111872) 2024-12-09T13:29:50,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741878_1054 (size=111872) 2024-12-09T13:29:50,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741879_1055 (size=1597213) 2024-12-09T13:29:50,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741879_1055 (size=1597213) 2024-12-09T13:29:50,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741879_1055 (size=1597213) 2024-12-09T13:29:50,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741880_1056 (size=4188619) 2024-12-09T13:29:50,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741880_1056 (size=4188619) 2024-12-09T13:29:50,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741880_1056 (size=4188619) 2024-12-09T13:29:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741881_1057 (size=127628) 2024-12-09T13:29:50,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741881_1057 (size=127628) 2024-12-09T13:29:50,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741881_1057 (size=127628) 2024-12-09T13:29:50,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741882_1058 (size=29229) 2024-12-09T13:29:50,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741882_1058 (size=29229) 2024-12-09T13:29:50,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741882_1058 (size=29229) 2024-12-09T13:29:50,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741883_1059 (size=45609) 2024-12-09T13:29:50,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741883_1059 (size=45609) 2024-12-09T13:29:50,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741883_1059 (size=45609) 2024-12-09T13:29:50,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741884_1060 (size=8360360) 2024-12-09T13:29:50,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741884_1060 (size=8360360) 2024-12-09T13:29:50,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741884_1060 (size=8360360) 2024-12-09T13:29:50,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741885_1061 (size=1323991) 2024-12-09T13:29:50,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741885_1061 (size=1323991) 2024-12-09T13:29:50,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741885_1061 (size=1323991) 2024-12-09T13:29:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741886_1062 (size=232957) 2024-12-09T13:29:50,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741886_1062 (size=232957) 2024-12-09T13:29:50,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741886_1062 (size=232957) 2024-12-09T13:29:50,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741887_1063 (size=217634) 2024-12-09T13:29:50,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741887_1063 (size=217634) 2024-12-09T13:29:50,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741887_1063 (size=217634) 2024-12-09T13:29:50,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741888_1064 (size=903930) 2024-12-09T13:29:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741888_1064 (size=903930) 2024-12-09T13:29:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741888_1064 (size=903930) 2024-12-09T13:29:50,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741889_1065 (size=77835) 2024-12-09T13:29:50,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741889_1065 (size=77835) 2024-12-09T13:29:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741889_1065 (size=77835) 2024-12-09T13:29:50,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741890_1066 (size=443172) 2024-12-09T13:29:50,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741890_1066 (size=443172) 2024-12-09T13:29:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741890_1066 (size=443172) 2024-12-09T13:29:50,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741891_1067 (size=1832290) 2024-12-09T13:29:50,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741891_1067 (size=1832290) 2024-12-09T13:29:50,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741891_1067 (size=1832290) 2024-12-09T13:29:50,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741892_1068 (size=30949) 2024-12-09T13:29:50,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741892_1068 (size=30949) 2024-12-09T13:29:50,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741892_1068 (size=30949) 2024-12-09T13:29:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741893_1069 (size=136454) 2024-12-09T13:29:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741893_1069 (size=136454) 2024-12-09T13:29:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741893_1069 (size=136454) 2024-12-09T13:29:50,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741894_1070 (size=322274) 2024-12-09T13:29:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741894_1070 (size=322274) 2024-12-09T13:29:50,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741894_1070 (size=322274) 2024-12-09T13:29:50,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741895_1071 (size=131440) 2024-12-09T13:29:50,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741895_1071 (size=131440) 2024-12-09T13:29:50,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741895_1071 (size=131440) 2024-12-09T13:29:50,398 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:29:50,404 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-09T13:29:50,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=7a88374f70350ec93398ce71231ae080-297b6e3d5cf24e119b169d5abb242636_SeqId_4_. 2024-12-09T13:29:50,410 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=7a88374f70350ec93398ce71231ae080-297b6e3d5cf24e119b169d5abb242636_SeqId_4_. 2024-12-09T13:29:50,411 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-09T13:29:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741896_1072 (size=244) 2024-12-09T13:29:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741896_1072 (size=244) 2024-12-09T13:29:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741896_1072 (size=244) 2024-12-09T13:29:50,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741897_1073 (size=17) 2024-12-09T13:29:50,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741897_1073 (size=17) 2024-12-09T13:29:50,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741897_1073 (size=17) 2024-12-09T13:29:50,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741898_1074 (size=304160) 2024-12-09T13:29:50,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741898_1074 (size=304160) 2024-12-09T13:29:50,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741898_1074 (size=304160) 2024-12-09T13:29:50,920 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:29:50,921 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:29:51,059 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:29:51,208 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0001_000001 (auth:SIMPLE) from 127.0.0.1:54208 2024-12-09T13:29:56,019 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:29:58,282 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0001_000001 (auth:SIMPLE) from 127.0.0.1:55924 2024-12-09T13:29:58,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741899_1075 (size=349858) 2024-12-09T13:29:58,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741899_1075 (size=349858) 2024-12-09T13:29:58,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741899_1075 (size=349858) 2024-12-09T13:30:00,534 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0001_000001 (auth:SIMPLE) from 127.0.0.1:33554 2024-12-09T13:30:11,802 INFO [master/c48e4b8eb196:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T13:30:11,802 INFO [master/c48e4b8eb196:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T13:30:22,178 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 47e98b76f711545b77b78e05577538d0, had cached 0 bytes from a total of 5286 2024-12-09T13:30:22,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1d30f226bd6406aad18df926a90eb66c, had cached 0 bytes from a total of 8326 2024-12-09T13:30:26,019 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:30:30,960 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1d30f226bd6406aad18df926a90eb66c changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:30:30,961 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 96d9654456b57311f15f8834c0cf2325 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:30:30,961 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 47e98b76f711545b77b78e05577538d0 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:30:31,977 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 31220c1b1827b53b6a58d3a59622a0f0, had cached 0 bytes from a total of 320414712 2024-12-09T13:30:32,075 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 829531b2168b02ea6a230665bf08db6a, had cached 0 bytes from a total of 320414712 2024-12-09T13:30:37,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741900_1076 (size=134217728) 2024-12-09T13:30:37,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741900_1076 (size=134217728) 2024-12-09T13:30:37,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741900_1076 (size=134217728) 2024-12-09T13:30:56,020 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:31:04,219 INFO [regionserver/c48e4b8eb196:0.Chore.1 {}] hbase.ScheduledChore(145): Chore: MemstoreFlusherChore missed its start time 2024-12-09T13:31:04,221 WARN [DataXceiver for client DFSClient_attempt_1733750974572_0001_m_000000_0_-1969763237_1 at /127.0.0.1:37752 [Receiving block BP-67561561-172.17.0.2-1733750962175:blk_1073741901_1077] {}] datanode.BlockReceiver(626): Slow BlockReceiver write packet to mirror took 1139ms (threshold=300ms), downstream DNs=[127.0.0.1:39425, 127.0.0.1:42277], blockId=1073741901, seqno=3871 2024-12-09T13:31:04,223 INFO [regionserver/c48e4b8eb196:0.Chore.1 {}] hbase.ScheduledChore(145): Chore: MemstoreFlusherChore missed its start time 2024-12-09T13:31:07,178 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 47e98b76f711545b77b78e05577538d0, had cached 0 bytes from a total of 5286 2024-12-09T13:31:07,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1d30f226bd6406aad18df926a90eb66c, had cached 0 bytes from a total of 8326 2024-12-09T13:31:08,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741901_1077 (size=134217728) 2024-12-09T13:31:08,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741901_1077 (size=134217728) 2024-12-09T13:31:08,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741901_1077 (size=134217728) 2024-12-09T13:31:16,978 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 31220c1b1827b53b6a58d3a59622a0f0, had cached 0 bytes from a total of 320414712 2024-12-09T13:31:17,076 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 829531b2168b02ea6a230665bf08db6a, had cached 0 bytes from a total of 320414712 2024-12-09T13:31:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741902_1078 (size=51979256) 2024-12-09T13:31:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741902_1078 (size=51979256) 2024-12-09T13:31:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741902_1078 (size=51979256) 2024-12-09T13:31:19,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741903_1079 (size=17520) 2024-12-09T13:31:19,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741903_1079 (size=17520) 2024-12-09T13:31:19,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741903_1079 (size=17520) 2024-12-09T13:31:19,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741904_1080 (size=482) 2024-12-09T13:31:19,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741904_1080 (size=482) 2024-12-09T13:31:19,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741904_1080 (size=482) 2024-12-09T13:31:19,844 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0001/container_1733750974572_0001_01_000002/launch_container.sh] 2024-12-09T13:31:19,844 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0001/container_1733750974572_0001_01_000002/container_tokens] 2024-12-09T13:31:19,844 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0001/container_1733750974572_0001_01_000002/sysfs] 2024-12-09T13:31:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741905_1081 (size=17520) 2024-12-09T13:31:19,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741905_1081 (size=17520) 2024-12-09T13:31:19,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741905_1081 (size=17520) 2024-12-09T13:31:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741906_1082 (size=349858) 2024-12-09T13:31:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741906_1082 (size=349858) 2024-12-09T13:31:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741906_1082 (size=349858) 2024-12-09T13:31:19,871 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0001_000001 (auth:SIMPLE) from 127.0.0.1:60114 2024-12-09T13:31:21,346 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:31:21,348 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:31:21,355 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,355 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:31:21,356 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:31:21,356 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,357 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-09T13:31:21,357 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-09T13:31:21,357 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,358 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-09T13:31:21,358 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733750988683/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-09T13:31:21,373 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T13:31:21,382 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751081382"}]},"ts":"1733751081382"} 2024-12-09T13:31:21,384 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-09T13:31:21,385 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-09T13:31:21,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-09T13:31:21,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, UNASSIGN}] 2024-12-09T13:31:21,395 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, UNASSIGN 2024-12-09T13:31:21,395 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, UNASSIGN 2024-12-09T13:31:21,399 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=829531b2168b02ea6a230665bf08db6a, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:31:21,399 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=31220c1b1827b53b6a58d3a59622a0f0, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:31:21,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, UNASSIGN because future has completed 2024-12-09T13:31:21,402 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:31:21,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 829531b2168b02ea6a230665bf08db6a, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:31:21,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, UNASSIGN because future has completed 2024-12-09T13:31:21,405 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=c48e4b8eb196,44683,1733750968041, table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T13:31:21,406 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:31:21,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:31:21,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T13:31:21,558 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:31:21,558 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:31:21,558 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 829531b2168b02ea6a230665bf08db6a, disabling compactions & flushes 2024-12-09T13:31:21,558 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:31:21,558 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:31:21,558 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. after waiting 0 ms 2024-12-09T13:31:21,558 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:31:21,564 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-09T13:31:21,565 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:31:21,565 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a. 2024-12-09T13:31:21,565 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 829531b2168b02ea6a230665bf08db6a: Waiting for close lock at 1733751081558Running coprocessor pre-close hooks at 1733751081558Disabling compacts and flushes for region at 1733751081558Disabling writes for close at 1733751081558Writing region close event to WAL at 1733751081559 (+1 ms)Running coprocessor post-close hooks at 1733751081565 (+6 ms)Closed at 1733751081565 2024-12-09T13:31:21,568 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 829531b2168b02ea6a230665bf08db6a 2024-12-09T13:31:21,568 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:31:21,568 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:31:21,568 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 31220c1b1827b53b6a58d3a59622a0f0, disabling compactions & flushes 2024-12-09T13:31:21,568 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:31:21,568 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=829531b2168b02ea6a230665bf08db6a, regionState=CLOSED 2024-12-09T13:31:21,568 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:31:21,568 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. after waiting 0 ms 2024-12-09T13:31:21,568 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:31:21,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 829531b2168b02ea6a230665bf08db6a, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:31:21,576 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-09T13:31:21,576 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 829531b2168b02ea6a230665bf08db6a, server=c48e4b8eb196,44683,1733750968041 in 171 msec 2024-12-09T13:31:21,576 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-09T13:31:21,577 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:31:21,577 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0. 2024-12-09T13:31:21,577 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 31220c1b1827b53b6a58d3a59622a0f0: Waiting for close lock at 1733751081568Running coprocessor pre-close hooks at 1733751081568Disabling compacts and flushes for region at 1733751081568Disabling writes for close at 1733751081568Writing region close event to WAL at 1733751081569 (+1 ms)Running coprocessor post-close hooks at 1733751081577 (+8 ms)Closed at 1733751081577 2024-12-09T13:31:21,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=829531b2168b02ea6a230665bf08db6a, UNASSIGN in 183 msec 2024-12-09T13:31:21,579 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:31:21,580 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=31220c1b1827b53b6a58d3a59622a0f0, regionState=CLOSED 2024-12-09T13:31:21,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:31:21,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-12-09T13:31:21,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 31220c1b1827b53b6a58d3a59622a0f0, server=c48e4b8eb196,44683,1733750968041 in 177 msec 2024-12-09T13:31:21,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-12-09T13:31:21,588 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=31220c1b1827b53b6a58d3a59622a0f0, UNASSIGN in 192 msec 2024-12-09T13:31:21,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-09T13:31:21,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 202 msec 2024-12-09T13:31:21,594 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751081594"}]},"ts":"1733751081594"} 2024-12-09T13:31:21,596 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-09T13:31:21,596 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-09T13:31:21,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 222 msec 2024-12-09T13:31:21,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T13:31:21,703 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:31:21,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,712 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,713 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,717 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,721 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080 2024-12-09T13:31:21,721 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:31:21,721 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a 2024-12-09T13:31:21,724 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/recovered.edits] 2024-12-09T13:31:21,724 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/recovered.edits] 2024-12-09T13:31:21,724 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/recovered.edits] 2024-12-09T13:31:21,732 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080 2024-12-09T13:31:21,732 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_.7a88374f70350ec93398ce71231ae080 2024-12-09T13:31:21,732 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/cf/297b6e3d5cf24e119b169d5abb242636_SeqId_4_ 2024-12-09T13:31:21,735 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/recovered.edits/10.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0/recovered.edits/10.seqid 2024-12-09T13:31:21,735 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/recovered.edits/10.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a/recovered.edits/10.seqid 2024-12-09T13:31:21,736 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/recovered.edits/6.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080/recovered.edits/6.seqid 2024-12-09T13:31:21,736 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/31220c1b1827b53b6a58d3a59622a0f0 2024-12-09T13:31:21,736 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/829531b2168b02ea6a230665bf08db6a 2024-12-09T13:31:21,736 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportFileSystemStateWithSplitRegion/7a88374f70350ec93398ce71231ae080 2024-12-09T13:31:21,736 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-09T13:31:21,739 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-09T13:31:21,750 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-09T13:31:21,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,761 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:21,761 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:21,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,772 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:21,772 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:21,775 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-09T13:31:21,777 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,777 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-09T13:31:21,778 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751081777"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:21,778 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751081777"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:21,778 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751081777"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:21,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:21,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:21,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:21,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:21,783 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-09T13:31:21,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:21,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:21,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T13:31:21,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:21,783 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7a88374f70350ec93398ce71231ae080, NAME => 'testExportFileSystemStateWithSplitRegion,,1733750978990.7a88374f70350ec93398ce71231ae080.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 31220c1b1827b53b6a58d3a59622a0f0, NAME => 'testExportFileSystemStateWithSplitRegion,,1733750985348.31220c1b1827b53b6a58d3a59622a0f0.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 829531b2168b02ea6a230665bf08db6a, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733750985348.829531b2168b02ea6a230665bf08db6a.', STARTKEY => '5', ENDKEY => ''}] 2024-12-09T13:31:21,784 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-09T13:31:21,784 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:21,784 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751081784"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:21,788 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-09T13:31:21,789 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 82 msec 2024-12-09T13:31:21,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T13:31:21,894 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,895 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:31:21,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:21,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-09T13:31:21,900 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751081900"}]},"ts":"1733751081900"} 2024-12-09T13:31:21,903 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-09T13:31:21,903 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-09T13:31:21,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-09T13:31:21,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, UNASSIGN}] 2024-12-09T13:31:21,908 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, UNASSIGN 2024-12-09T13:31:21,908 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, UNASSIGN 2024-12-09T13:31:21,909 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=1d30f226bd6406aad18df926a90eb66c, regionState=CLOSING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:21,909 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=47e98b76f711545b77b78e05577538d0, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:31:21,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, UNASSIGN because future has completed 2024-12-09T13:31:21,912 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:31:21,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1d30f226bd6406aad18df926a90eb66c, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:31:21,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, UNASSIGN because future has completed 2024-12-09T13:31:21,912 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=c48e4b8eb196,39211,1733750968235, table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T13:31:21,913 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:31:21,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 47e98b76f711545b77b78e05577538d0, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:31:22,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-09T13:31:22,065 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:31:22,066 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:31:22,066 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 1d30f226bd6406aad18df926a90eb66c, disabling compactions & flushes 2024-12-09T13:31:22,066 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:31:22,066 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:31:22,066 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. after waiting 0 ms 2024-12-09T13:31:22,066 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:31:22,067 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 47e98b76f711545b77b78e05577538d0 2024-12-09T13:31:22,067 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:31:22,067 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 47e98b76f711545b77b78e05577538d0, disabling compactions & flushes 2024-12-09T13:31:22,067 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:31:22,067 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:31:22,067 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. after waiting 0 ms 2024-12-09T13:31:22,067 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:31:22,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:31:22,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:31:22,074 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:31:22,074 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0. 2024-12-09T13:31:22,074 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 47e98b76f711545b77b78e05577538d0: Waiting for close lock at 1733751082067Running coprocessor pre-close hooks at 1733751082067Disabling compacts and flushes for region at 1733751082067Disabling writes for close at 1733751082067Writing region close event to WAL at 1733751082068 (+1 ms)Running coprocessor post-close hooks at 1733751082074 (+6 ms)Closed at 1733751082074 2024-12-09T13:31:22,076 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 47e98b76f711545b77b78e05577538d0 2024-12-09T13:31:22,077 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=47e98b76f711545b77b78e05577538d0, regionState=CLOSED 2024-12-09T13:31:22,078 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:31:22,078 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c. 2024-12-09T13:31:22,078 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 1d30f226bd6406aad18df926a90eb66c: Waiting for close lock at 1733751082066Running coprocessor pre-close hooks at 1733751082066Disabling compacts and flushes for region at 1733751082066Disabling writes for close at 1733751082066Writing region close event to WAL at 1733751082067 (+1 ms)Running coprocessor post-close hooks at 1733751082078 (+11 ms)Closed at 1733751082078 2024-12-09T13:31:22,080 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:31:22,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 47e98b76f711545b77b78e05577538d0, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:31:22,082 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=1d30f226bd6406aad18df926a90eb66c, regionState=CLOSED 2024-12-09T13:31:22,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1d30f226bd6406aad18df926a90eb66c, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:31:22,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-09T13:31:22,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 47e98b76f711545b77b78e05577538d0, server=c48e4b8eb196,39211,1733750968235 in 169 msec 2024-12-09T13:31:22,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=47e98b76f711545b77b78e05577538d0, UNASSIGN in 177 msec 2024-12-09T13:31:22,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-09T13:31:22,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 1d30f226bd6406aad18df926a90eb66c, server=c48e4b8eb196,36493,1733750968163 in 175 msec 2024-12-09T13:31:22,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-09T13:31:22,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1d30f226bd6406aad18df926a90eb66c, UNASSIGN in 182 msec 2024-12-09T13:31:22,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-09T13:31:22,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 187 msec 2024-12-09T13:31:22,102 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751082102"}]},"ts":"1733751082102"} 2024-12-09T13:31:22,104 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-09T13:31:22,104 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-09T13:31:22,107 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 210 msec 2024-12-09T13:31:22,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-09T13:31:22,214 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:31:22,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,221 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,223 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,226 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,228 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0 2024-12-09T13:31:22,228 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:31:22,230 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/recovered.edits] 2024-12-09T13:31:22,230 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/recovered.edits] 2024-12-09T13:31:22,234 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf/561f9d8a344640358eeb707acec3031d to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/cf/561f9d8a344640358eeb707acec3031d 2024-12-09T13:31:22,234 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf/29af96df0ef3442fa669c7a19acff786 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/cf/29af96df0ef3442fa669c7a19acff786 2024-12-09T13:31:22,237 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c/recovered.edits/9.seqid 2024-12-09T13:31:22,237 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0/recovered.edits/9.seqid 2024-12-09T13:31:22,237 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/1d30f226bd6406aad18df926a90eb66c 2024-12-09T13:31:22,237 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSplitRegion/47e98b76f711545b77b78e05577538d0 2024-12-09T13:31:22,237 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-09T13:31:22,240 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,242 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-09T13:31:22,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:22,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:22,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-09T13:31:22,270 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-09T13:31:22,272 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,272 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-09T13:31:22,272 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751082272"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:22,273 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751082272"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:22,275 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:31:22,275 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 47e98b76f711545b77b78e05577538d0, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733750976662.47e98b76f711545b77b78e05577538d0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1d30f226bd6406aad18df926a90eb66c, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733750976662.1d30f226bd6406aad18df926a90eb66c.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:31:22,275 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-09T13:31:22,276 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751082275"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,278 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data null 2024-12-09T13:31:22,278 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T13:31:22,279 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-09T13:31:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T13:31:22,280 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,282 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 64 msec 2024-12-09T13:31:22,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T13:31:22,384 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,384 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-09T13:31:22,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-09T13:31:22,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-09T13:31:22,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-09T13:31:22,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:22,449 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=766 (was 719) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c48e4b8eb196:0.Chore.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1335861966_1 at /127.0.0.1:37050 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:38821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 124040) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c48e4b8eb196:0.Chore.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:44870 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:36076 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1372 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1335861966_1 at /127.0.0.1:60668 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:38208 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 778) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=376 (was 343) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=567 (was 5920) 2024-12-09T13:31:22,450 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=766 is superior to 500 2024-12-09T13:31:22,467 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=766, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=376, ProcessCount=17, AvailableMemoryMB=565 2024-12-09T13:31:22,467 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=766 is superior to 500 2024-12-09T13:31:22,469 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:31:22,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:22,472 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:31:22,472 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:22,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-09T13:31:22,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T13:31:22,474 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:31:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741907_1083 (size=406) 2024-12-09T13:31:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741907_1083 (size=406) 2024-12-09T13:31:22,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741907_1083 (size=406) 2024-12-09T13:31:22,484 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 16e0b4fe3cecfaa132154bfd1666f6ee, NAME => 'testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:22,484 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b53a8ca793dfdf20a84d92e1adfa3e8d, NAME => 'testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741909_1085 (size=67) 2024-12-09T13:31:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741909_1085 (size=67) 2024-12-09T13:31:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741909_1085 (size=67) 2024-12-09T13:31:22,496 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:22,496 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing b53a8ca793dfdf20a84d92e1adfa3e8d, disabling compactions & flushes 2024-12-09T13:31:22,496 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,496 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,496 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. after waiting 0 ms 2024-12-09T13:31:22,496 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,496 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,497 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for b53a8ca793dfdf20a84d92e1adfa3e8d: Waiting for close lock at 1733751082496Disabling compacts and flushes for region at 1733751082496Disabling writes for close at 1733751082496Writing region close event to WAL at 1733751082496Closed at 1733751082496 2024-12-09T13:31:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741908_1084 (size=67) 2024-12-09T13:31:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741908_1084 (size=67) 2024-12-09T13:31:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741908_1084 (size=67) 2024-12-09T13:31:22,507 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:22,507 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 16e0b4fe3cecfaa132154bfd1666f6ee, disabling compactions & flushes 2024-12-09T13:31:22,507 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,507 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,507 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. after waiting 0 ms 2024-12-09T13:31:22,507 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,507 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,508 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 16e0b4fe3cecfaa132154bfd1666f6ee: Waiting for close lock at 1733751082507Disabling compacts and flushes for region at 1733751082507Disabling writes for close at 1733751082507Writing region close event to WAL at 1733751082507Closed at 1733751082507 2024-12-09T13:31:22,509 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:31:22,509 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733751082509"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751082509"}]},"ts":"1733751082509"} 2024-12-09T13:31:22,509 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733751082509"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751082509"}]},"ts":"1733751082509"} 2024-12-09T13:31:22,511 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:31:22,512 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:31:22,513 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751082512"}]},"ts":"1733751082512"} 2024-12-09T13:31:22,514 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-09T13:31:22,515 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:31:22,516 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:31:22,516 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:31:22,516 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:31:22,516 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:31:22,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, ASSIGN}] 2024-12-09T13:31:22,518 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, ASSIGN 2024-12-09T13:31:22,518 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, ASSIGN 2024-12-09T13:31:22,521 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:31:22,521 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:31:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T13:31:22,672 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:31:22,672 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=16e0b4fe3cecfaa132154bfd1666f6ee, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:31:22,673 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=b53a8ca793dfdf20a84d92e1adfa3e8d, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:22,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, ASSIGN because future has completed 2024-12-09T13:31:22,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:31:22,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, ASSIGN because future has completed 2024-12-09T13:31:22,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:31:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T13:31:22,836 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,836 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => 16e0b4fe3cecfaa132154bfd1666f6ee, NAME => 'testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:31:22,837 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. service=AccessControlService 2024-12-09T13:31:22,837 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:31:22,837 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,838 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:22,838 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,838 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,840 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,840 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => b53a8ca793dfdf20a84d92e1adfa3e8d, NAME => 'testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:31:22,840 INFO [StoreOpener-16e0b4fe3cecfaa132154bfd1666f6ee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,840 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. service=AccessControlService 2024-12-09T13:31:22,841 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:31:22,841 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,841 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:22,841 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,841 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,842 INFO [StoreOpener-b53a8ca793dfdf20a84d92e1adfa3e8d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,843 INFO [StoreOpener-16e0b4fe3cecfaa132154bfd1666f6ee-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16e0b4fe3cecfaa132154bfd1666f6ee columnFamilyName cf 2024-12-09T13:31:22,843 DEBUG [StoreOpener-16e0b4fe3cecfaa132154bfd1666f6ee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:22,843 INFO [StoreOpener-16e0b4fe3cecfaa132154bfd1666f6ee-1 {}] regionserver.HStore(327): Store=16e0b4fe3cecfaa132154bfd1666f6ee/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:31:22,844 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,844 INFO [StoreOpener-b53a8ca793dfdf20a84d92e1adfa3e8d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b53a8ca793dfdf20a84d92e1adfa3e8d columnFamilyName cf 2024-12-09T13:31:22,844 DEBUG [StoreOpener-b53a8ca793dfdf20a84d92e1adfa3e8d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:22,844 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,845 INFO [StoreOpener-b53a8ca793dfdf20a84d92e1adfa3e8d-1 {}] regionserver.HStore(327): Store=b53a8ca793dfdf20a84d92e1adfa3e8d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:31:22,845 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,845 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,845 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,845 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,845 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,846 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,846 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,846 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,847 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,848 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,849 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:31:22,849 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened 16e0b4fe3cecfaa132154bfd1666f6ee; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69499064, jitterRate=0.03561675548553467}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:31:22,849 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:22,850 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:31:22,850 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for 16e0b4fe3cecfaa132154bfd1666f6ee: Running coprocessor pre-open hook at 1733751082838Writing region info on filesystem at 1733751082838Initializing all the Stores at 1733751082840 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751082840Cleaning up temporary data from old regions at 1733751082845 (+5 ms)Running coprocessor post-open hooks at 1733751082849 (+4 ms)Region opened successfully at 1733751082850 (+1 ms) 2024-12-09T13:31:22,850 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened b53a8ca793dfdf20a84d92e1adfa3e8d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59867859, jitterRate=-0.10789938271045685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:31:22,851 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:22,851 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee., pid=48, masterSystemTime=1733751082829 2024-12-09T13:31:22,851 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for b53a8ca793dfdf20a84d92e1adfa3e8d: Running coprocessor pre-open hook at 1733751082841Writing region info on filesystem at 1733751082841Initializing all the Stores at 1733751082842 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751082842Cleaning up temporary data from old regions at 1733751082846 (+4 ms)Running coprocessor post-open hooks at 1733751082851 (+5 ms)Region opened successfully at 1733751082851 2024-12-09T13:31:22,851 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d., pid=49, masterSystemTime=1733751082835 2024-12-09T13:31:22,852 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,852 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:22,853 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=16e0b4fe3cecfaa132154bfd1666f6ee, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:31:22,853 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,853 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:22,854 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=b53a8ca793dfdf20a84d92e1adfa3e8d, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:22,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:31:22,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:31:22,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-09T13:31:22,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee, server=c48e4b8eb196,39211,1733750968235 in 180 msec 2024-12-09T13:31:22,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-09T13:31:22,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d, server=c48e4b8eb196,36493,1733750968163 in 178 msec 2024-12-09T13:31:22,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, ASSIGN in 341 msec 2024-12-09T13:31:22,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-12-09T13:31:22,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, ASSIGN in 342 msec 2024-12-09T13:31:22,861 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:31:22,861 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751082861"}]},"ts":"1733751082861"} 2024-12-09T13:31:22,863 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-09T13:31:22,864 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:31:22,864 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-09T13:31:22,867 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-09T13:31:22,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:22,928 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:22,928 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:22,928 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:22,929 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:22,934 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 462 msec 2024-12-09T13:31:23,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-09T13:31:23,105 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T13:31:23,105 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-09T13:31:23,106 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:23,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-09T13:31:23,115 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:23,115 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-12-09T13:31:23,115 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T13:31:23,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T13:31:23,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751083119 (current time:1733751083119). 2024-12-09T13:31:23,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:31:23,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-09T13:31:23,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:31:23,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70cde376, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:23,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:23,121 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:23,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:23,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:23,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f54bb19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:23,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:23,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,123 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:23,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e36c184, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:23,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:23,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:23,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48972, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:23,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:31:23,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:23,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,129 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:23,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cf78a24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:23,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:23,131 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:23,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:23,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:23,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77264ce3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:23,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:23,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,132 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:23,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49b49278, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:23,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:23,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:23,136 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48976, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:23,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:31:23,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:23,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,142 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:23,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-09T13:31:23,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:31:23,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T13:31:23,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-09T13:31:23,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T13:31:23,146 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:31:23,147 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:31:23,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:31:23,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741910_1086 (size=167) 2024-12-09T13:31:23,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741910_1086 (size=167) 2024-12-09T13:31:23,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741910_1086 (size=167) 2024-12-09T13:31:23,159 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:31:23,159 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee}] 2024-12-09T13:31:23,160 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:23,160 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:23,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T13:31:23,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-09T13:31:23,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-09T13:31:23,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:23,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:23,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for b53a8ca793dfdf20a84d92e1adfa3e8d: 2024-12-09T13:31:23,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 16e0b4fe3cecfaa132154bfd1666f6ee: 2024-12-09T13:31:23,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. for emptySnaptb0-testExportWithTargetName completed. 2024-12-09T13:31:23,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. for emptySnaptb0-testExportWithTargetName completed. 2024-12-09T13:31:23,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-09T13:31:23,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-09T13:31:23,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:23,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:23,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:31:23,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:31:23,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741911_1087 (size=70) 2024-12-09T13:31:23,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741911_1087 (size=70) 2024-12-09T13:31:23,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741912_1088 (size=70) 2024-12-09T13:31:23,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741912_1088 (size=70) 2024-12-09T13:31:23,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741911_1087 (size=70) 2024-12-09T13:31:23,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:23,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-09T13:31:23,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741912_1088 (size=70) 2024-12-09T13:31:23,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:23,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-09T13:31:23,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-09T13:31:23,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:23,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-09T13:31:23,328 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:23,328 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:23,328 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:23,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee in 172 msec 2024-12-09T13:31:23,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-09T13:31:23,333 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:31:23,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d in 172 msec 2024-12-09T13:31:23,334 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:31:23,335 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:31:23,335 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-09T13:31:23,336 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-09T13:31:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741913_1089 (size=549) 2024-12-09T13:31:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741913_1089 (size=549) 2024-12-09T13:31:23,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741913_1089 (size=549) 2024-12-09T13:31:23,353 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:31:23,358 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:31:23,359 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-09T13:31:23,360 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:31:23,360 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-09T13:31:23,362 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 217 msec 2024-12-09T13:31:23,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-09T13:31:23,463 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T13:31:23,467 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='04793b36cf2d6f629712c617607041434', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:31:23,468 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='131f6bcec5b2a9e9ddd13e6cd5ae7dab5', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,469 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='294357fce4cf66d376e8ed7db9a9f3701', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,471 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='35955542ff8f614d39a660cfc67438808', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,472 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='5ce33eb99443470fe70032f7f1676bfaa', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,473 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='43ddbe3bee533a4ac69fd89329e872c22', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:31:23,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:31:23,481 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T13:31:23,484 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-09T13:31:23,484 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:23,484 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:23,486 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T13:31:23,492 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T13:31:23,499 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-09T13:31:23,502 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T13:31:23,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751083502 (current time:1733751083502). 2024-12-09T13:31:23,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:31:23,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-09T13:31:23,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:31:23,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5993741c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:23,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:23,504 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:23,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:23,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:23,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19e997ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:23,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:23,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,506 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52208, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:23,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b19d961, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:23,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:23,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:23,509 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:23,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:23,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:23,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,510 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:23,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ecb00ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:23,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:23,511 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:23,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:23,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:23,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b27be05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:23,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:23,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,513 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52226, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:23,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48fc68d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:23,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:23,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:23,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:23,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:23,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:23,518 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:23,519 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-09T13:31:23,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:31:23,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-09T13:31:23,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-09T13:31:23,521 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:31:23,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T13:31:23,522 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:31:23,524 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:31:23,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741914_1090 (size=162) 2024-12-09T13:31:23,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741914_1090 (size=162) 2024-12-09T13:31:23,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741914_1090 (size=162) 2024-12-09T13:31:23,532 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:31:23,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee}] 2024-12-09T13:31:23,533 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:23,533 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:23,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T13:31:23,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-09T13:31:23,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-09T13:31:23,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:23,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:23,685 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing b53a8ca793dfdf20a84d92e1adfa3e8d 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-09T13:31:23,685 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 16e0b4fe3cecfaa132154bfd1666f6ee 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-09T13:31:23,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/.tmp/cf/cf5ed4ba0a99436e8b116a9f7a39d936 is 69, key is 04793b36cf2d6f629712c617607041434/cf:q/1733751083475/Put/seqid=0 2024-12-09T13:31:23,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/.tmp/cf/5b05a2df3799456f86d86975af6f3e15 is 71, key is 1625813e46a70e37dcffff86d7d03731/cf:q/1733751083478/Put/seqid=0 2024-12-09T13:31:23,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741916_1092 (size=8462) 2024-12-09T13:31:23,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741915_1091 (size=5149) 2024-12-09T13:31:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741916_1092 (size=8462) 2024-12-09T13:31:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741916_1092 (size=8462) 2024-12-09T13:31:23,720 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/.tmp/cf/cf5ed4ba0a99436e8b116a9f7a39d936 2024-12-09T13:31:23,720 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/.tmp/cf/5b05a2df3799456f86d86975af6f3e15 2024-12-09T13:31:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741915_1091 (size=5149) 2024-12-09T13:31:23,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741915_1091 (size=5149) 2024-12-09T13:31:23,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/.tmp/cf/cf5ed4ba0a99436e8b116a9f7a39d936 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf/cf5ed4ba0a99436e8b116a9f7a39d936 2024-12-09T13:31:23,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/.tmp/cf/5b05a2df3799456f86d86975af6f3e15 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf/5b05a2df3799456f86d86975af6f3e15 2024-12-09T13:31:23,734 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf/cf5ed4ba0a99436e8b116a9f7a39d936, entries=1, sequenceid=6, filesize=5.0 K 2024-12-09T13:31:23,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for b53a8ca793dfdf20a84d92e1adfa3e8d in 50ms, sequenceid=6, compaction requested=false 2024-12-09T13:31:23,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-09T13:31:23,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for b53a8ca793dfdf20a84d92e1adfa3e8d: 2024-12-09T13:31:23,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. for snaptb0-testExportWithTargetName completed. 2024-12-09T13:31:23,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-09T13:31:23,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:23,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf/cf5ed4ba0a99436e8b116a9f7a39d936] hfiles 2024-12-09T13:31:23,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf/cf5ed4ba0a99436e8b116a9f7a39d936 for snapshot=snaptb0-testExportWithTargetName 2024-12-09T13:31:23,738 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf/5b05a2df3799456f86d86975af6f3e15, entries=49, sequenceid=6, filesize=8.3 K 2024-12-09T13:31:23,740 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 16e0b4fe3cecfaa132154bfd1666f6ee in 54ms, sequenceid=6, compaction requested=false 2024-12-09T13:31:23,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 16e0b4fe3cecfaa132154bfd1666f6ee: 2024-12-09T13:31:23,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. for snaptb0-testExportWithTargetName completed. 2024-12-09T13:31:23,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-09T13:31:23,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:23,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf/5b05a2df3799456f86d86975af6f3e15] hfiles 2024-12-09T13:31:23,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf/5b05a2df3799456f86d86975af6f3e15 for snapshot=snaptb0-testExportWithTargetName 2024-12-09T13:31:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741917_1093 (size=109) 2024-12-09T13:31:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741917_1093 (size=109) 2024-12-09T13:31:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741918_1094 (size=109) 2024-12-09T13:31:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741918_1094 (size=109) 2024-12-09T13:31:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741917_1093 (size=109) 2024-12-09T13:31:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741918_1094 (size=109) 2024-12-09T13:31:23,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:23,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-09T13:31:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-09T13:31:23,768 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:23,768 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:23,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d in 237 msec 2024-12-09T13:31:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T13:31:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T13:31:24,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:24,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-09T13:31:24,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-09T13:31:24,168 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:24,169 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:24,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-09T13:31:24,171 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:31:24,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee in 637 msec 2024-12-09T13:31:24,172 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:31:24,173 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:31:24,173 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-09T13:31:24,174 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-09T13:31:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741919_1095 (size=627) 2024-12-09T13:31:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741919_1095 (size=627) 2024-12-09T13:31:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741919_1095 (size=627) 2024-12-09T13:31:24,186 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:31:24,192 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:31:24,193 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-09T13:31:24,194 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:31:24,194 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-09T13:31:24,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 674 msec 2024-12-09T13:31:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-09T13:31:24,663 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T13:31:24,664 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664 2024-12-09T13:31:24,664 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:24,710 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:24,710 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-09T13:31:24,712 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:31:24,722 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-09T13:31:24,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741920_1096 (size=162) 2024-12-09T13:31:24,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741920_1096 (size=162) 2024-12-09T13:31:24,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741920_1096 (size=162) 2024-12-09T13:31:24,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741921_1097 (size=627) 2024-12-09T13:31:24,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741921_1097 (size=627) 2024-12-09T13:31:24,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741921_1097 (size=627) 2024-12-09T13:31:24,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741922_1098 (size=154) 2024-12-09T13:31:24,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741922_1098 (size=154) 2024-12-09T13:31:24,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741922_1098 (size=154) 2024-12-09T13:31:24,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:24,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:24,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-1663782356275716003.jar 2024-12-09T13:31:25,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-10905163118379099733.jar 2024-12-09T13:31:25,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:25,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:31:25,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:31:25,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:31:25,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:31:25,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:31:25,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:31:25,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:31:25,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:31:25,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:31:25,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:31:25,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:31:25,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:25,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:25,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:31:25,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:25,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:25,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:31:25,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:31:25,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741923_1099 (size=5175431) 2024-12-09T13:31:25,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741923_1099 (size=5175431) 2024-12-09T13:31:25,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741923_1099 (size=5175431) 2024-12-09T13:31:25,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741924_1100 (size=1877034) 2024-12-09T13:31:25,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741924_1100 (size=1877034) 2024-12-09T13:31:25,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741924_1100 (size=1877034) 2024-12-09T13:31:25,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741925_1101 (size=20406) 2024-12-09T13:31:25,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741925_1101 (size=20406) 2024-12-09T13:31:25,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741925_1101 (size=20406) 2024-12-09T13:31:25,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741926_1102 (size=24096) 2024-12-09T13:31:25,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741926_1102 (size=24096) 2024-12-09T13:31:25,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741926_1102 (size=24096) 2024-12-09T13:31:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741927_1103 (size=503880) 2024-12-09T13:31:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741927_1103 (size=503880) 2024-12-09T13:31:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741927_1103 (size=503880) 2024-12-09T13:31:25,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741928_1104 (size=4695811) 2024-12-09T13:31:25,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741928_1104 (size=4695811) 2024-12-09T13:31:25,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741928_1104 (size=4695811) 2024-12-09T13:31:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741929_1105 (size=111872) 2024-12-09T13:31:25,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741929_1105 (size=111872) 2024-12-09T13:31:25,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741929_1105 (size=111872) 2024-12-09T13:31:25,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741930_1106 (size=1597213) 2024-12-09T13:31:25,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741930_1106 (size=1597213) 2024-12-09T13:31:25,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741930_1106 (size=1597213) 2024-12-09T13:31:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741931_1107 (size=4188619) 2024-12-09T13:31:25,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741931_1107 (size=4188619) 2024-12-09T13:31:25,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741931_1107 (size=4188619) 2024-12-09T13:31:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741932_1108 (size=127628) 2024-12-09T13:31:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741932_1108 (size=127628) 2024-12-09T13:31:25,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741932_1108 (size=127628) 2024-12-09T13:31:25,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741933_1109 (size=29229) 2024-12-09T13:31:25,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741933_1109 (size=29229) 2024-12-09T13:31:25,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741933_1109 (size=29229) 2024-12-09T13:31:25,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741934_1110 (size=45609) 2024-12-09T13:31:25,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741934_1110 (size=45609) 2024-12-09T13:31:25,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741934_1110 (size=45609) 2024-12-09T13:31:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741935_1111 (size=8360360) 2024-12-09T13:31:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741935_1111 (size=8360360) 2024-12-09T13:31:25,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741935_1111 (size=8360360) 2024-12-09T13:31:25,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741936_1112 (size=1323991) 2024-12-09T13:31:25,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741936_1112 (size=1323991) 2024-12-09T13:31:25,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741936_1112 (size=1323991) 2024-12-09T13:31:25,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741937_1113 (size=232957) 2024-12-09T13:31:25,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741937_1113 (size=232957) 2024-12-09T13:31:25,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741937_1113 (size=232957) 2024-12-09T13:31:25,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741938_1114 (size=217634) 2024-12-09T13:31:25,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741938_1114 (size=217634) 2024-12-09T13:31:25,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741938_1114 (size=217634) 2024-12-09T13:31:25,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741939_1115 (size=903930) 2024-12-09T13:31:25,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741939_1115 (size=903930) 2024-12-09T13:31:25,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741939_1115 (size=903930) 2024-12-09T13:31:25,988 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0001/container_1733750974572_0001_01_000001/launch_container.sh] 2024-12-09T13:31:25,989 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0001/container_1733750974572_0001_01_000001/container_tokens] 2024-12-09T13:31:25,989 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0001/container_1733750974572_0001_01_000001/sysfs] 2024-12-09T13:31:25,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0001_000001 (auth:SIMPLE) from 127.0.0.1:52688 2024-12-09T13:31:26,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741940_1116 (size=77835) 2024-12-09T13:31:26,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741940_1116 (size=77835) 2024-12-09T13:31:26,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741940_1116 (size=77835) 2024-12-09T13:31:26,020 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:31:26,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741941_1117 (size=1832290) 2024-12-09T13:31:26,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741941_1117 (size=1832290) 2024-12-09T13:31:26,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741941_1117 (size=1832290) 2024-12-09T13:31:26,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741942_1118 (size=30949) 2024-12-09T13:31:26,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741942_1118 (size=30949) 2024-12-09T13:31:26,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741942_1118 (size=30949) 2024-12-09T13:31:26,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741943_1119 (size=136454) 2024-12-09T13:31:26,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741943_1119 (size=136454) 2024-12-09T13:31:26,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741943_1119 (size=136454) 2024-12-09T13:31:26,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741944_1120 (size=6425017) 2024-12-09T13:31:26,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741944_1120 (size=6425017) 2024-12-09T13:31:26,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741944_1120 (size=6425017) 2024-12-09T13:31:26,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741945_1121 (size=322274) 2024-12-09T13:31:26,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741945_1121 (size=322274) 2024-12-09T13:31:26,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741945_1121 (size=322274) 2024-12-09T13:31:26,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741946_1122 (size=131440) 2024-12-09T13:31:26,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741946_1122 (size=131440) 2024-12-09T13:31:26,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741946_1122 (size=131440) 2024-12-09T13:31:26,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741947_1123 (size=443172) 2024-12-09T13:31:26,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741947_1123 (size=443172) 2024-12-09T13:31:26,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741947_1123 (size=443172) 2024-12-09T13:31:26,534 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:31:26,537 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-09T13:31:26,540 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-12-09T13:31:26,540 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-12-09T13:31:26,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741948_1124 (size=445) 2024-12-09T13:31:26,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741948_1124 (size=445) 2024-12-09T13:31:26,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741948_1124 (size=445) 2024-12-09T13:31:26,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741949_1125 (size=21) 2024-12-09T13:31:26,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741949_1125 (size=21) 2024-12-09T13:31:26,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741949_1125 (size=21) 2024-12-09T13:31:26,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741950_1126 (size=304107) 2024-12-09T13:31:26,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741950_1126 (size=304107) 2024-12-09T13:31:26,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741950_1126 (size=304107) 2024-12-09T13:31:26,612 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:31:26,612 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:31:26,836 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0002_000001 (auth:SIMPLE) from 127.0.0.1:40310 2024-12-09T13:31:27,099 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:31:27,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-09T13:31:27,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-09T13:31:27,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:27,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-09T13:31:31,225 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b53a8ca793dfdf20a84d92e1adfa3e8d changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:31:31,226 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 16e0b4fe3cecfaa132154bfd1666f6ee changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:31:32,207 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0002_000001 (auth:SIMPLE) from 127.0.0.1:44024 2024-12-09T13:31:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741951_1127 (size=349805) 2024-12-09T13:31:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741951_1127 (size=349805) 2024-12-09T13:31:32,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741951_1127 (size=349805) 2024-12-09T13:31:32,987 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:31:34,469 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0002_000001 (auth:SIMPLE) from 127.0.0.1:54772 2024-12-09T13:31:34,472 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0002_000001 (auth:SIMPLE) from 127.0.0.1:58304 2024-12-09T13:31:41,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741952_1128 (size=8462) 2024-12-09T13:31:41,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741952_1128 (size=8462) 2024-12-09T13:31:41,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741952_1128 (size=8462) 2024-12-09T13:31:41,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000002/launch_container.sh] 2024-12-09T13:31:41,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000002/container_tokens] 2024-12-09T13:31:41,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000002/sysfs] 2024-12-09T13:31:42,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741954_1130 (size=5149) 2024-12-09T13:31:42,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741954_1130 (size=5149) 2024-12-09T13:31:42,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741954_1130 (size=5149) 2024-12-09T13:31:42,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741953_1129 (size=22152) 2024-12-09T13:31:42,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741953_1129 (size=22152) 2024-12-09T13:31:42,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741953_1129 (size=22152) 2024-12-09T13:31:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741955_1131 (size=465) 2024-12-09T13:31:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741955_1131 (size=465) 2024-12-09T13:31:42,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741955_1131 (size=465) 2024-12-09T13:31:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741956_1132 (size=22152) 2024-12-09T13:31:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741956_1132 (size=22152) 2024-12-09T13:31:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741956_1132 (size=22152) 2024-12-09T13:31:42,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741957_1133 (size=349805) 2024-12-09T13:31:42,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741957_1133 (size=349805) 2024-12-09T13:31:42,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741957_1133 (size=349805) 2024-12-09T13:31:42,799 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0002_000001 (auth:SIMPLE) from 127.0.0.1:39310 2024-12-09T13:31:42,812 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733750974572_0002_01_000003 is : 143 2024-12-09T13:31:42,821 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000003/launch_container.sh] 2024-12-09T13:31:42,821 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000003/container_tokens] 2024-12-09T13:31:42,821 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000003/sysfs] 2024-12-09T13:31:44,793 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:31:44,794 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:31:44,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-09T13:31:44,800 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:31:44,801 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:31:44,801 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-09T13:31:44,801 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-09T13:31:44,801 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-09T13:31:44,801 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664/.hbase-snapshot/testExportWithTargetName 2024-12-09T13:31:44,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-09T13:31:44,802 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751084664/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-09T13:31:44,810 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-09T13:31:44,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T13:31:44,813 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751104813"}]},"ts":"1733751104813"} 2024-12-09T13:31:44,815 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-09T13:31:44,816 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-09T13:31:44,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-09T13:31:44,817 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, UNASSIGN}] 2024-12-09T13:31:44,818 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, UNASSIGN 2024-12-09T13:31:44,818 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, UNASSIGN 2024-12-09T13:31:44,819 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=b53a8ca793dfdf20a84d92e1adfa3e8d, regionState=CLOSING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:44,819 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=16e0b4fe3cecfaa132154bfd1666f6ee, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:31:44,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, UNASSIGN because future has completed 2024-12-09T13:31:44,821 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:31:44,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:31:44,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, UNASSIGN because future has completed 2024-12-09T13:31:44,821 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:31:44,822 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:31:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T13:31:44,974 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:44,974 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:31:44,974 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 16e0b4fe3cecfaa132154bfd1666f6ee, disabling compactions & flushes 2024-12-09T13:31:44,974 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:44,974 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:44,974 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. after waiting 0 ms 2024-12-09T13:31:44,974 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:44,974 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:44,974 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:31:44,975 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing b53a8ca793dfdf20a84d92e1adfa3e8d, disabling compactions & flushes 2024-12-09T13:31:44,975 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:44,975 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:44,975 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. after waiting 0 ms 2024-12-09T13:31:44,975 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:44,982 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:31:44,982 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:31:44,983 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:31:44,983 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:31:44,983 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee. 2024-12-09T13:31:44,983 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d. 2024-12-09T13:31:44,983 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 16e0b4fe3cecfaa132154bfd1666f6ee: Waiting for close lock at 1733751104974Running coprocessor pre-close hooks at 1733751104974Disabling compacts and flushes for region at 1733751104974Disabling writes for close at 1733751104974Writing region close event to WAL at 1733751104975 (+1 ms)Running coprocessor post-close hooks at 1733751104983 (+8 ms)Closed at 1733751104983 2024-12-09T13:31:44,983 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for b53a8ca793dfdf20a84d92e1adfa3e8d: Waiting for close lock at 1733751104974Running coprocessor pre-close hooks at 1733751104974Disabling compacts and flushes for region at 1733751104975 (+1 ms)Disabling writes for close at 1733751104975Writing region close event to WAL at 1733751104976 (+1 ms)Running coprocessor post-close hooks at 1733751104983 (+7 ms)Closed at 1733751104983 2024-12-09T13:31:44,985 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:44,986 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=b53a8ca793dfdf20a84d92e1adfa3e8d, regionState=CLOSED 2024-12-09T13:31:44,986 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:44,987 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=16e0b4fe3cecfaa132154bfd1666f6ee, regionState=CLOSED 2024-12-09T13:31:44,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:31:44,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:31:44,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-12-09T13:31:44,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure b53a8ca793dfdf20a84d92e1adfa3e8d, server=c48e4b8eb196,36493,1733750968163 in 169 msec 2024-12-09T13:31:44,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-12-09T13:31:44,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 16e0b4fe3cecfaa132154bfd1666f6ee, server=c48e4b8eb196,39211,1733750968235 in 170 msec 2024-12-09T13:31:44,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b53a8ca793dfdf20a84d92e1adfa3e8d, UNASSIGN in 176 msec 2024-12-09T13:31:44,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-09T13:31:44,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=16e0b4fe3cecfaa132154bfd1666f6ee, UNASSIGN in 177 msec 2024-12-09T13:31:44,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-09T13:31:44,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 181 msec 2024-12-09T13:31:45,000 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751105000"}]},"ts":"1733751105000"} 2024-12-09T13:31:45,002 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-09T13:31:45,002 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-09T13:31:45,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 193 msec 2024-12-09T13:31:45,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-09T13:31:45,134 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T13:31:45,135 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-09T13:31:45,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:45,138 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:45,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-09T13:31:45,140 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:45,143 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-09T13:31:45,145 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:45,145 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:45,147 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/recovered.edits] 2024-12-09T13:31:45,148 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/recovered.edits] 2024-12-09T13:31:45,152 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf/5b05a2df3799456f86d86975af6f3e15 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/cf/5b05a2df3799456f86d86975af6f3e15 2024-12-09T13:31:45,152 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf/cf5ed4ba0a99436e8b116a9f7a39d936 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/cf/cf5ed4ba0a99436e8b116a9f7a39d936 2024-12-09T13:31:45,155 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee/recovered.edits/9.seqid 2024-12-09T13:31:45,156 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d/recovered.edits/9.seqid 2024-12-09T13:31:45,156 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/16e0b4fe3cecfaa132154bfd1666f6ee 2024-12-09T13:31:45,156 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithTargetName/b53a8ca793dfdf20a84d92e1adfa3e8d 2024-12-09T13:31:45,156 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-09T13:31:45,159 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:45,163 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-09T13:31:45,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-09T13:31:45,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-09T13:31:45,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-09T13:31:45,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-09T13:31:45,174 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-09T13:31:45,176 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:45,176 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-09T13:31:45,176 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751105176"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:45,176 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751105176"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:45,179 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:31:45,179 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b53a8ca793dfdf20a84d92e1adfa3e8d, NAME => 'testtb-testExportWithTargetName,,1733751082469.b53a8ca793dfdf20a84d92e1adfa3e8d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 16e0b4fe3cecfaa132154bfd1666f6ee, NAME => 'testtb-testExportWithTargetName,1,1733751082469.16e0b4fe3cecfaa132154bfd1666f6ee.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:31:45,179 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,180 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751105179"}]},"ts":"9223372036854775807"} 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-09T13:31:45,183 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-09T13:31:45,184 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-09T13:31:45,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 49 msec 2024-12-09T13:31:45,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-09T13:31:45,295 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-09T13:31:45,296 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-09T13:31:45,311 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-09T13:31:45,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-09T13:31:45,318 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-09T13:31:45,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-09T13:31:45,345 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=795 (was 766) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:55154 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43301 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43105 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39825 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 127705) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_182006402_1 at /127.0.0.1:60052 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:47116 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:60070 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:43105 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:43301 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33133 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41669 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2133 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_182006402_1 at /127.0.0.1:55146 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 807), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=527 (was 376) - SystemLoadAverage LEAK? -, ProcessCount=18 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=2224 (was 565) - AvailableMemoryMB LEAK? - 2024-12-09T13:31:45,345 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-09T13:31:45,361 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=795, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=527, ProcessCount=18, AvailableMemoryMB=2224 2024-12-09T13:31:45,361 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-09T13:31:45,363 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:31:45,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:31:45,365 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:31:45,366 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:45,366 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-09T13:31:45,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T13:31:45,367 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:31:45,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741958_1134 (size=404) 2024-12-09T13:31:45,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741958_1134 (size=404) 2024-12-09T13:31:45,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741958_1134 (size=404) 2024-12-09T13:31:45,378 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f0828c080a2e915ab2af5a6e6615ed13, NAME => 'testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:45,378 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 585b6e74652cbb1b9e8003901efa080f, NAME => 'testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:45,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741960_1136 (size=65) 2024-12-09T13:31:45,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741960_1136 (size=65) 2024-12-09T13:31:45,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741960_1136 (size=65) 2024-12-09T13:31:45,390 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:45,390 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 585b6e74652cbb1b9e8003901efa080f, disabling compactions & flushes 2024-12-09T13:31:45,390 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,390 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,390 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. after waiting 0 ms 2024-12-09T13:31:45,390 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,390 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,390 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 585b6e74652cbb1b9e8003901efa080f: Waiting for close lock at 1733751105390Disabling compacts and flushes for region at 1733751105390Disabling writes for close at 1733751105390Writing region close event to WAL at 1733751105390Closed at 1733751105390 2024-12-09T13:31:45,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741959_1135 (size=65) 2024-12-09T13:31:45,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741959_1135 (size=65) 2024-12-09T13:31:45,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741959_1135 (size=65) 2024-12-09T13:31:45,396 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:45,396 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing f0828c080a2e915ab2af5a6e6615ed13, disabling compactions & flushes 2024-12-09T13:31:45,396 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,396 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,396 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. after waiting 0 ms 2024-12-09T13:31:45,396 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,396 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,396 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for f0828c080a2e915ab2af5a6e6615ed13: Waiting for close lock at 1733751105396Disabling compacts and flushes for region at 1733751105396Disabling writes for close at 1733751105396Writing region close event to WAL at 1733751105396Closed at 1733751105396 2024-12-09T13:31:45,397 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:31:45,398 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733751105397"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751105397"}]},"ts":"1733751105397"} 2024-12-09T13:31:45,398 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733751105397"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751105397"}]},"ts":"1733751105397"} 2024-12-09T13:31:45,400 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:31:45,401 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:31:45,402 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751105401"}]},"ts":"1733751105401"} 2024-12-09T13:31:45,404 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-09T13:31:45,404 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:31:45,406 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:31:45,406 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:31:45,406 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:31:45,406 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:31:45,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, ASSIGN}] 2024-12-09T13:31:45,408 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, ASSIGN 2024-12-09T13:31:45,408 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, ASSIGN 2024-12-09T13:31:45,409 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:31:45,409 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:31:45,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T13:31:45,559 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:31:45,560 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=f0828c080a2e915ab2af5a6e6615ed13, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:45,560 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=585b6e74652cbb1b9e8003901efa080f, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:31:45,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, ASSIGN because future has completed 2024-12-09T13:31:45,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure f0828c080a2e915ab2af5a6e6615ed13, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:31:45,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, ASSIGN because future has completed 2024-12-09T13:31:45,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 585b6e74652cbb1b9e8003901efa080f, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:31:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T13:31:45,719 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,719 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => f0828c080a2e915ab2af5a6e6615ed13, NAME => 'testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:31:45,719 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. service=AccessControlService 2024-12-09T13:31:45,720 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:31:45,720 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,720 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:45,720 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,720 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,720 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,721 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 585b6e74652cbb1b9e8003901efa080f, NAME => 'testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:31:45,721 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. service=AccessControlService 2024-12-09T13:31:45,721 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:31:45,721 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,721 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:45,722 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,722 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,722 INFO [StoreOpener-f0828c080a2e915ab2af5a6e6615ed13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,724 INFO [StoreOpener-585b6e74652cbb1b9e8003901efa080f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,725 INFO [StoreOpener-f0828c080a2e915ab2af5a6e6615ed13-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0828c080a2e915ab2af5a6e6615ed13 columnFamilyName cf 2024-12-09T13:31:45,725 DEBUG [StoreOpener-f0828c080a2e915ab2af5a6e6615ed13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:45,725 INFO [StoreOpener-585b6e74652cbb1b9e8003901efa080f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 585b6e74652cbb1b9e8003901efa080f columnFamilyName cf 2024-12-09T13:31:45,726 DEBUG [StoreOpener-585b6e74652cbb1b9e8003901efa080f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:45,726 INFO [StoreOpener-f0828c080a2e915ab2af5a6e6615ed13-1 {}] regionserver.HStore(327): Store=f0828c080a2e915ab2af5a6e6615ed13/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:31:45,726 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,727 INFO [StoreOpener-585b6e74652cbb1b9e8003901efa080f-1 {}] regionserver.HStore(327): Store=585b6e74652cbb1b9e8003901efa080f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:31:45,727 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,727 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,728 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,728 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,728 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,728 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,728 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,729 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,729 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,730 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,731 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:31:45,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:31:45,733 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 585b6e74652cbb1b9e8003901efa080f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60342784, jitterRate=-0.10082244873046875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:31:45,733 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened f0828c080a2e915ab2af5a6e6615ed13; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68855995, jitterRate=0.02603428065776825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:31:45,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:45,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:45,734 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 585b6e74652cbb1b9e8003901efa080f: Running coprocessor pre-open hook at 1733751105722Writing region info on filesystem at 1733751105722Initializing all the Stores at 1733751105723 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751105723Cleaning up temporary data from old regions at 1733751105729 (+6 ms)Running coprocessor post-open hooks at 1733751105733 (+4 ms)Region opened successfully at 1733751105734 (+1 ms) 2024-12-09T13:31:45,734 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for f0828c080a2e915ab2af5a6e6615ed13: Running coprocessor pre-open hook at 1733751105720Writing region info on filesystem at 1733751105720Initializing all the Stores at 1733751105722 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751105722Cleaning up temporary data from old regions at 1733751105728 (+6 ms)Running coprocessor post-open hooks at 1733751105733 (+5 ms)Region opened successfully at 1733751105734 (+1 ms) 2024-12-09T13:31:45,734 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13., pid=66, masterSystemTime=1733751105714 2024-12-09T13:31:45,735 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f., pid=67, masterSystemTime=1733751105715 2024-12-09T13:31:45,736 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,736 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:45,737 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=585b6e74652cbb1b9e8003901efa080f, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:31:45,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,737 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:45,737 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=f0828c080a2e915ab2af5a6e6615ed13, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:45,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 585b6e74652cbb1b9e8003901efa080f, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:31:45,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure f0828c080a2e915ab2af5a6e6615ed13, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:31:45,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-09T13:31:45,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 585b6e74652cbb1b9e8003901efa080f, server=c48e4b8eb196,44683,1733750968041 in 176 msec 2024-12-09T13:31:45,741 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-09T13:31:45,741 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure f0828c080a2e915ab2af5a6e6615ed13, server=c48e4b8eb196,36493,1733750968163 in 178 msec 2024-12-09T13:31:45,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, ASSIGN in 334 msec 2024-12-09T13:31:45,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-09T13:31:45,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, ASSIGN in 335 msec 2024-12-09T13:31:45,744 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:31:45,744 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751105744"}]},"ts":"1733751105744"} 2024-12-09T13:31:45,746 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-09T13:31:45,746 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:31:45,746 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-09T13:31:45,749 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T13:31:45,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:45,813 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:45,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:45,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:45,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:45,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 450 msec 2024-12-09T13:31:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-09T13:31:45,993 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T13:31:45,994 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-09T13:31:45,994 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:46,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-09T13:31:46,001 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:46,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-09T13:31:46,001 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:46,006 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T13:31:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751106006 (current time:1733751106006). 2024-12-09T13:31:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:31:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-09T13:31:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:31:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74a87752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:46,008 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:46,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:46,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:46,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75104b82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:46,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:46,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,010 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33878, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:46,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a415441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:46,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:46,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:46,013 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52764, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:46,014 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,014 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76af04dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:46,015 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:46,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:46,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:46,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6564cc6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:46,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:46,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,017 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33890, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:46,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521292bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:46,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:46,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:46,020 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:46,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:46,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,024 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:46,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T13:31:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:31:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T13:31:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-09T13:31:46,027 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:31:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T13:31:46,028 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:31:46,030 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:31:46,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741961_1137 (size=161) 2024-12-09T13:31:46,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741961_1137 (size=161) 2024-12-09T13:31:46,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741961_1137 (size=161) 2024-12-09T13:31:46,037 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:31:46,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f}] 2024-12-09T13:31:46,038 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:46,038 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T13:31:46,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-09T13:31:46,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-09T13:31:46,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:46,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:46,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for f0828c080a2e915ab2af5a6e6615ed13: 2024-12-09T13:31:46,192 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 585b6e74652cbb1b9e8003901efa080f: 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:46,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:31:46,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741962_1138 (size=68) 2024-12-09T13:31:46,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741962_1138 (size=68) 2024-12-09T13:31:46,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741962_1138 (size=68) 2024-12-09T13:31:46,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:46,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-09T13:31:46,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741963_1139 (size=68) 2024-12-09T13:31:46,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-09T13:31:46,207 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:46,207 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:46,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741963_1139 (size=68) 2024-12-09T13:31:46,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741963_1139 (size=68) 2024-12-09T13:31:46,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:46,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-09T13:31:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-09T13:31:46,209 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:46,209 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:46,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f in 171 msec 2024-12-09T13:31:46,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-09T13:31:46,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13 in 173 msec 2024-12-09T13:31:46,212 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:31:46,212 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:31:46,213 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:31:46,213 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-09T13:31:46,214 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-09T13:31:46,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741964_1140 (size=543) 2024-12-09T13:31:46,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741964_1140 (size=543) 2024-12-09T13:31:46,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741964_1140 (size=543) 2024-12-09T13:31:46,227 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:31:46,232 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:31:46,232 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-09T13:31:46,234 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:31:46,234 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-09T13:31:46,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 209 msec 2024-12-09T13:31:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-09T13:31:46,343 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T13:31:46,351 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0e318e986a966378de9d40db2c12a0dd8', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:31:46,353 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='129f0949cb1f29cca7a4e23410b845d78', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:31:46,354 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2e2649547adac02755d389637ddee62c3', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:31:46,356 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='332fa4fe75e92f9cbcd4db931eea9b44d', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:31:46,357 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='4fd0850aafd43aa8ca72bdf3f06aaa7ec', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:31:46,359 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:46,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:31:46,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:46,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:31:46,365 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:46,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-09T13:31:46,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:46,369 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:46,371 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:46,376 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:46,384 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:46,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T13:31:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751106388 (current time:1733751106388). 2024-12-09T13:31:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:31:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-09T13:31:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:31:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39681b3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:46,390 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:46,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:46,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:46,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72eb8549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:46,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:46,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,392 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:46,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@175d106, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:46,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:46,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:46,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:46,397 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,397 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13fe2986, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:46,399 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:46,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:46,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:46,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68589724, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:46,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:46,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,401 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:46,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ee372e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:46,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:46,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:46,406 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52788, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:46,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:46,409 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:46,409 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T13:31:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:31:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-09T13:31:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-09T13:31:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T13:31:46,413 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:31:46,414 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:31:46,416 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:31:46,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741965_1141 (size=156) 2024-12-09T13:31:46,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741965_1141 (size=156) 2024-12-09T13:31:46,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741965_1141 (size=156) 2024-12-09T13:31:46,422 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:31:46,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f}] 2024-12-09T13:31:46,423 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:46,424 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T13:31:46,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-09T13:31:46,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-09T13:31:46,576 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:46,576 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:46,576 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing f0828c080a2e915ab2af5a6e6615ed13 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-09T13:31:46,576 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 585b6e74652cbb1b9e8003901efa080f 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-09T13:31:46,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/.tmp/cf/4cf5ce2e85b649568d5f26a6fc08e313 is 71, key is 0dd8e4035617b9063db57a9646666e47/cf:q/1733751106361/Put/seqid=0 2024-12-09T13:31:46,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/.tmp/cf/d0e74845a4a142f6b053735307f01583 is 71, key is 1507efaa38cf4d7a8cc007d298b783f4/cf:q/1733751106364/Put/seqid=0 2024-12-09T13:31:46,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741967_1143 (size=8394) 2024-12-09T13:31:46,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741966_1142 (size=5216) 2024-12-09T13:31:46,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741967_1143 (size=8394) 2024-12-09T13:31:46,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741967_1143 (size=8394) 2024-12-09T13:31:46,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741966_1142 (size=5216) 2024-12-09T13:31:46,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741966_1142 (size=5216) 2024-12-09T13:31:46,625 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/.tmp/cf/4cf5ce2e85b649568d5f26a6fc08e313 2024-12-09T13:31:46,625 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/.tmp/cf/d0e74845a4a142f6b053735307f01583 2024-12-09T13:31:46,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/.tmp/cf/d0e74845a4a142f6b053735307f01583 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf/d0e74845a4a142f6b053735307f01583 2024-12-09T13:31:46,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/.tmp/cf/4cf5ce2e85b649568d5f26a6fc08e313 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf/4cf5ce2e85b649568d5f26a6fc08e313 2024-12-09T13:31:46,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf/4cf5ce2e85b649568d5f26a6fc08e313, entries=2, sequenceid=6, filesize=5.1 K 2024-12-09T13:31:46,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf/d0e74845a4a142f6b053735307f01583, entries=48, sequenceid=6, filesize=8.2 K 2024-12-09T13:31:46,640 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for f0828c080a2e915ab2af5a6e6615ed13 in 64ms, sequenceid=6, compaction requested=false 2024-12-09T13:31:46,640 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 585b6e74652cbb1b9e8003901efa080f in 64ms, sequenceid=6, compaction requested=false 2024-12-09T13:31:46,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-09T13:31:46,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 585b6e74652cbb1b9e8003901efa080f: 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for f0828c080a2e915ab2af5a6e6615ed13: 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. for snaptb0-testExportWithResetTtl completed. 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. for snaptb0-testExportWithResetTtl completed. 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf/4cf5ce2e85b649568d5f26a6fc08e313] hfiles 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf/d0e74845a4a142f6b053735307f01583] hfiles 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf/4cf5ce2e85b649568d5f26a6fc08e313 for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf/d0e74845a4a142f6b053735307f01583 for snapshot=snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741968_1144 (size=107) 2024-12-09T13:31:46,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741968_1144 (size=107) 2024-12-09T13:31:46,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741968_1144 (size=107) 2024-12-09T13:31:46,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:31:46,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-09T13:31:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-09T13:31:46,654 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:46,654 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:31:46,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f0828c080a2e915ab2af5a6e6615ed13 in 232 msec 2024-12-09T13:31:46,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741969_1145 (size=107) 2024-12-09T13:31:46,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741969_1145 (size=107) 2024-12-09T13:31:46,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741969_1145 (size=107) 2024-12-09T13:31:46,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:31:46,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-09T13:31:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-09T13:31:46,662 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:46,662 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:31:46,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-09T13:31:46,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 585b6e74652cbb1b9e8003901efa080f in 239 msec 2024-12-09T13:31:46,664 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:31:46,665 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:31:46,666 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:31:46,666 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,666 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741970_1146 (size=621) 2024-12-09T13:31:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741970_1146 (size=621) 2024-12-09T13:31:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741970_1146 (size=621) 2024-12-09T13:31:46,676 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:31:46,682 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:31:46,682 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-09T13:31:46,684 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:31:46,684 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-09T13:31:46,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 274 msec 2024-12-09T13:31:46,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-09T13:31:46,733 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T13:31:46,735 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:31:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-09T13:31:46,737 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:31:46,737 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:46,738 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-09T13:31:46,738 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:31:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T13:31:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741971_1147 (size=397) 2024-12-09T13:31:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741971_1147 (size=397) 2024-12-09T13:31:46,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741971_1147 (size=397) 2024-12-09T13:31:46,750 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 29020d606f6c9b790f14faa68fd8562f, NAME => 'testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:46,750 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 989e27290d4b0f91a1c8b9750101135e, NAME => 'testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:46,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741972_1148 (size=58) 2024-12-09T13:31:46,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741972_1148 (size=58) 2024-12-09T13:31:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741972_1148 (size=58) 2024-12-09T13:31:46,769 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:46,769 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 989e27290d4b0f91a1c8b9750101135e, disabling compactions & flushes 2024-12-09T13:31:46,770 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:46,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:46,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. after waiting 0 ms 2024-12-09T13:31:46,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:46,770 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:46,770 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 989e27290d4b0f91a1c8b9750101135e: Waiting for close lock at 1733751106769Disabling compacts and flushes for region at 1733751106769Disabling writes for close at 1733751106770 (+1 ms)Writing region close event to WAL at 1733751106770Closed at 1733751106770 2024-12-09T13:31:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741973_1149 (size=58) 2024-12-09T13:31:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741973_1149 (size=58) 2024-12-09T13:31:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741973_1149 (size=58) 2024-12-09T13:31:46,777 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:46,777 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 29020d606f6c9b790f14faa68fd8562f, disabling compactions & flushes 2024-12-09T13:31:46,777 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:46,777 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:46,778 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. after waiting 0 ms 2024-12-09T13:31:46,778 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:46,778 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:46,778 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 29020d606f6c9b790f14faa68fd8562f: Waiting for close lock at 1733751106777Disabling compacts and flushes for region at 1733751106777Disabling writes for close at 1733751106778 (+1 ms)Writing region close event to WAL at 1733751106778Closed at 1733751106778 2024-12-09T13:31:46,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:31:46,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733751106779"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751106779"}]},"ts":"1733751106779"} 2024-12-09T13:31:46,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733751106779"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751106779"}]},"ts":"1733751106779"} 2024-12-09T13:31:46,781 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:31:46,782 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:31:46,782 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751106782"}]},"ts":"1733751106782"} 2024-12-09T13:31:46,784 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-09T13:31:46,784 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:31:46,786 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:31:46,786 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:31:46,786 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:31:46,786 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:31:46,787 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, ASSIGN}] 2024-12-09T13:31:46,788 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, ASSIGN 2024-12-09T13:31:46,789 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, ASSIGN 2024-12-09T13:31:46,789 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:31:46,789 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:31:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T13:31:46,940 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:31:46,940 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=989e27290d4b0f91a1c8b9750101135e, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:46,940 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=29020d606f6c9b790f14faa68fd8562f, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:31:46,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, ASSIGN because future has completed 2024-12-09T13:31:46,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 989e27290d4b0f91a1c8b9750101135e, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:31:46,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, ASSIGN because future has completed 2024-12-09T13:31:46,947 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29020d606f6c9b790f14faa68fd8562f, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:31:47,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T13:31:47,103 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:47,103 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 989e27290d4b0f91a1c8b9750101135e, NAME => 'testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:31:47,103 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:47,103 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 29020d606f6c9b790f14faa68fd8562f, NAME => 'testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. service=AccessControlService 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. service=AccessControlService 2024-12-09T13:31:47,104 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:31:47,104 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,104 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,106 INFO [StoreOpener-989e27290d4b0f91a1c8b9750101135e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,106 INFO [StoreOpener-29020d606f6c9b790f14faa68fd8562f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,108 INFO [StoreOpener-29020d606f6c9b790f14faa68fd8562f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29020d606f6c9b790f14faa68fd8562f columnFamilyName cf 2024-12-09T13:31:47,108 INFO [StoreOpener-989e27290d4b0f91a1c8b9750101135e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 989e27290d4b0f91a1c8b9750101135e columnFamilyName cf 2024-12-09T13:31:47,108 DEBUG [StoreOpener-989e27290d4b0f91a1c8b9750101135e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:47,108 DEBUG [StoreOpener-29020d606f6c9b790f14faa68fd8562f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:31:47,109 INFO [StoreOpener-29020d606f6c9b790f14faa68fd8562f-1 {}] regionserver.HStore(327): Store=29020d606f6c9b790f14faa68fd8562f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:31:47,109 INFO [StoreOpener-989e27290d4b0f91a1c8b9750101135e-1 {}] regionserver.HStore(327): Store=989e27290d4b0f91a1c8b9750101135e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:31:47,109 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,109 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,110 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,110 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,111 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,111 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,111 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,111 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,112 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,112 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,113 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,113 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,115 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:31:47,116 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:31:47,116 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 989e27290d4b0f91a1c8b9750101135e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63757099, jitterRate=-0.04994519054889679}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:31:47,116 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 29020d606f6c9b790f14faa68fd8562f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71258267, jitterRate=0.061830922961235046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:31:47,116 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,116 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,117 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 989e27290d4b0f91a1c8b9750101135e: Running coprocessor pre-open hook at 1733751107104Writing region info on filesystem at 1733751107104Initializing all the Stores at 1733751107105 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751107105Cleaning up temporary data from old regions at 1733751107112 (+7 ms)Running coprocessor post-open hooks at 1733751107116 (+4 ms)Region opened successfully at 1733751107117 (+1 ms) 2024-12-09T13:31:47,117 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 29020d606f6c9b790f14faa68fd8562f: Running coprocessor pre-open hook at 1733751107104Writing region info on filesystem at 1733751107105 (+1 ms)Initializing all the Stores at 1733751107106 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751107106Cleaning up temporary data from old regions at 1733751107112 (+6 ms)Running coprocessor post-open hooks at 1733751107116 (+4 ms)Region opened successfully at 1733751107117 (+1 ms) 2024-12-09T13:31:47,117 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f., pid=78, masterSystemTime=1733751107100 2024-12-09T13:31:47,117 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e., pid=77, masterSystemTime=1733751107099 2024-12-09T13:31:47,119 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:47,119 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:47,120 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=989e27290d4b0f91a1c8b9750101135e, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:31:47,120 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:47,120 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:47,121 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=29020d606f6c9b790f14faa68fd8562f, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:31:47,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 989e27290d4b0f91a1c8b9750101135e, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:31:47,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29020d606f6c9b790f14faa68fd8562f, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:31:47,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-12-09T13:31:47,125 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 989e27290d4b0f91a1c8b9750101135e, server=c48e4b8eb196,36493,1733750968163 in 179 msec 2024-12-09T13:31:47,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-12-09T13:31:47,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 29020d606f6c9b790f14faa68fd8562f, server=c48e4b8eb196,39211,1733750968235 in 176 msec 2024-12-09T13:31:47,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, ASSIGN in 339 msec 2024-12-09T13:31:47,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-12-09T13:31:47,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, ASSIGN in 339 msec 2024-12-09T13:31:47,128 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:31:47,128 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751107128"}]},"ts":"1733751107128"} 2024-12-09T13:31:47,130 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-09T13:31:47,130 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:31:47,131 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-09T13:31:47,133 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T13:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:47,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:31:47,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,232 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,233 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:31:47,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 495 msec 2024-12-09T13:31:47,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-09T13:31:47,364 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-09T13:31:47,364 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-09T13:31:47,365 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:47,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-09T13:31:47,373 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:47,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-12-09T13:31:47,373 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:47,381 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='00b7e371b38f32b3427027c52f5100f93', locateType=CURRENT is [region=testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:47,382 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='14a17e02194a93910a6dd927f5b07045b', locateType=CURRENT is [region=testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:31:47,383 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3c6d1a6a4a95b82ecedafa738a42ab565', locateType=CURRENT is [region=testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:31:47,384 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2fc1abb33c3fae1a3c89a96aee08a3ce0', locateType=CURRENT is [region=testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:31:47,384 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='45ee7e79d9a867776fd7dc344ac660af3', locateType=CURRENT is [region=testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:31:47,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:31:47,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:31:47,389 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:47,392 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-09T13:31:47,392 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:47,392 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:31:47,394 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:47,399 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:47,405 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-09T13:31:47,408 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-09T13:31:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751107408 (current time:1733751107408). 2024-12-09T13:31:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-09T13:31:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:31:47,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43c238ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:47,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:47,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:47,410 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:47,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:47,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:47,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c0a4082, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:47,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:47,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:47,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:47,411 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:47,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@528050e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:47,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:47,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:47,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:47,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52796, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:47,415 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:47,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:47,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:47,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:47,416 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:47,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3738bc41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:47,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:31:47,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:31:47,417 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:31:47,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:31:47,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:31:47,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ed6e7ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:47,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:31:47,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:31:47,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:47,418 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33976, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:31:47,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36017516, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:31:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:31:47,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:31:47,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:31:47,423 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:31:47,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:31:47,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:31:47,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:31:47,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:31:47,427 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:31:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-09T13:31:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:31:47,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-09T13:31:47,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-09T13:31:47,429 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:31:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T13:31:47,430 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:31:47,432 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:31:47,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741974_1150 (size=143) 2024-12-09T13:31:47,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741974_1150 (size=143) 2024-12-09T13:31:47,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741974_1150 (size=143) 2024-12-09T13:31:47,440 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:31:47,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29020d606f6c9b790f14faa68fd8562f}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 989e27290d4b0f91a1c8b9750101135e}] 2024-12-09T13:31:47,442 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,442 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-09T13:31:47,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-09T13:31:47,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-09T13:31:47,485 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-09T13:31:47,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-09T13:31:47,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T13:31:47,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-09T13:31:47,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:47,595 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 989e27290d4b0f91a1c8b9750101135e 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-09T13:31:47,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-09T13:31:47,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:47,595 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 29020d606f6c9b790f14faa68fd8562f 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-09T13:31:47,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/.tmp/cf/7efec52958c34961a929a6542fa17d3f is 71, key is 01948c08f468a1a164053eb7875f8d1d/cf:q/1733751107386/Put/seqid=0 2024-12-09T13:31:47,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741975_1151 (size=5424) 2024-12-09T13:31:47,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741975_1151 (size=5424) 2024-12-09T13:31:47,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741975_1151 (size=5424) 2024-12-09T13:31:47,621 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/.tmp/cf/7efec52958c34961a929a6542fa17d3f 2024-12-09T13:31:47,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/.tmp/cf/de6f727e43364f5f8af9f5939c3acafb is 71, key is 15bfd3422ed2255aefbc6072230a668c/cf:q/1733751107388/Put/seqid=0 2024-12-09T13:31:47,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/.tmp/cf/7efec52958c34961a929a6542fa17d3f as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf/7efec52958c34961a929a6542fa17d3f 2024-12-09T13:31:47,637 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf/7efec52958c34961a929a6542fa17d3f, entries=5, sequenceid=5, filesize=5.3 K 2024-12-09T13:31:47,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741976_1152 (size=8190) 2024-12-09T13:31:47,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741976_1152 (size=8190) 2024-12-09T13:31:47,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741976_1152 (size=8190) 2024-12-09T13:31:47,643 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/.tmp/cf/de6f727e43364f5f8af9f5939c3acafb 2024-12-09T13:31:47,649 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 29020d606f6c9b790f14faa68fd8562f in 54ms, sequenceid=5, compaction requested=false 2024-12-09T13:31:47,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-09T13:31:47,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 29020d606f6c9b790f14faa68fd8562f: 2024-12-09T13:31:47,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. for snaptb-testExportWithResetTtl completed. 2024-12-09T13:31:47,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-09T13:31:47,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:47,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf/7efec52958c34961a929a6542fa17d3f] hfiles 2024-12-09T13:31:47,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf/7efec52958c34961a929a6542fa17d3f for snapshot=snaptb-testExportWithResetTtl 2024-12-09T13:31:47,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/.tmp/cf/de6f727e43364f5f8af9f5939c3acafb as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf/de6f727e43364f5f8af9f5939c3acafb 2024-12-09T13:31:47,657 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf/de6f727e43364f5f8af9f5939c3acafb, entries=45, sequenceid=5, filesize=8.0 K 2024-12-09T13:31:47,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741977_1153 (size=100) 2024-12-09T13:31:47,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741977_1153 (size=100) 2024-12-09T13:31:47,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741977_1153 (size=100) 2024-12-09T13:31:47,658 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 989e27290d4b0f91a1c8b9750101135e in 63ms, sequenceid=5, compaction requested=false 2024-12-09T13:31:47,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 989e27290d4b0f91a1c8b9750101135e: 2024-12-09T13:31:47,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. for snaptb-testExportWithResetTtl completed. 2024-12-09T13:31:47,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-09T13:31:47,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:31:47,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:31:47,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf/de6f727e43364f5f8af9f5939c3acafb] hfiles 2024-12-09T13:31:47,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-09T13:31:47,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf/de6f727e43364f5f8af9f5939c3acafb for snapshot=snaptb-testExportWithResetTtl 2024-12-09T13:31:47,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-09T13:31:47,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,659 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:31:47,661 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 29020d606f6c9b790f14faa68fd8562f in 220 msec 2024-12-09T13:31:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741978_1154 (size=100) 2024-12-09T13:31:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741978_1154 (size=100) 2024-12-09T13:31:47,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741978_1154 (size=100) 2024-12-09T13:31:47,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:31:47,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-09T13:31:47,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-09T13:31:47,672 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,672 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:31:47,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-09T13:31:47,674 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:31:47,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 989e27290d4b0f91a1c8b9750101135e in 233 msec 2024-12-09T13:31:47,675 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:31:47,675 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:31:47,675 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-09T13:31:47,676 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-09T13:31:47,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741979_1155 (size=600) 2024-12-09T13:31:47,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741979_1155 (size=600) 2024-12-09T13:31:47,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741979_1155 (size=600) 2024-12-09T13:31:47,689 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:31:47,696 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:31:47,697 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-09T13:31:47,698 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:31:47,698 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-09T13:31:47,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 271 msec 2024-12-09T13:31:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-09T13:31:47,743 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-09T13:31:47,754 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754 2024-12-09T13:31:47,754 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:47,786 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:31:47,786 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-09T13:31:47,789 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:31:47,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-09T13:31:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741980_1156 (size=143) 2024-12-09T13:31:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741981_1157 (size=600) 2024-12-09T13:31:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741981_1157 (size=600) 2024-12-09T13:31:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741980_1156 (size=143) 2024-12-09T13:31:47,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741981_1157 (size=600) 2024-12-09T13:31:47,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741980_1156 (size=143) 2024-12-09T13:31:47,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741982_1158 (size=141) 2024-12-09T13:31:47,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741982_1158 (size=141) 2024-12-09T13:31:47,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741982_1158 (size=141) 2024-12-09T13:31:47,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:47,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:47,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-4684624410344382427.jar 2024-12-09T13:31:48,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-15648920028574336038.jar 2024-12-09T13:31:48,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:31:48,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:31:48,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:31:48,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:31:48,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:31:48,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:31:48,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:31:48,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:31:48,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:31:48,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:31:48,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:31:48,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:31:48,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:48,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:48,746 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:31:48,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:48,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:31:48,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:31:48,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:31:48,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741983_1159 (size=5175431) 2024-12-09T13:31:48,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741983_1159 (size=5175431) 2024-12-09T13:31:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741983_1159 (size=5175431) 2024-12-09T13:31:48,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741984_1160 (size=6425017) 2024-12-09T13:31:48,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741984_1160 (size=6425017) 2024-12-09T13:31:48,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741984_1160 (size=6425017) 2024-12-09T13:31:48,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741985_1161 (size=1877034) 2024-12-09T13:31:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741985_1161 (size=1877034) 2024-12-09T13:31:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741985_1161 (size=1877034) 2024-12-09T13:31:48,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741986_1162 (size=20406) 2024-12-09T13:31:48,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741986_1162 (size=20406) 2024-12-09T13:31:48,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741986_1162 (size=20406) 2024-12-09T13:31:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741987_1163 (size=24096) 2024-12-09T13:31:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741987_1163 (size=24096) 2024-12-09T13:31:48,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741987_1163 (size=24096) 2024-12-09T13:31:48,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741988_1164 (size=503880) 2024-12-09T13:31:48,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741988_1164 (size=503880) 2024-12-09T13:31:48,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741988_1164 (size=503880) 2024-12-09T13:31:48,897 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0002_000001 (auth:SIMPLE) from 127.0.0.1:36058 2024-12-09T13:31:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741989_1165 (size=4695811) 2024-12-09T13:31:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741989_1165 (size=4695811) 2024-12-09T13:31:48,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741989_1165 (size=4695811) 2024-12-09T13:31:48,922 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000001/launch_container.sh] 2024-12-09T13:31:48,923 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000001/container_tokens] 2024-12-09T13:31:48,923 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0002/container_1733750974572_0002_01_000001/sysfs] 2024-12-09T13:31:48,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741990_1166 (size=443172) 2024-12-09T13:31:48,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741990_1166 (size=443172) 2024-12-09T13:31:48,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741990_1166 (size=443172) 2024-12-09T13:31:48,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741991_1167 (size=111872) 2024-12-09T13:31:48,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741991_1167 (size=111872) 2024-12-09T13:31:48,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741991_1167 (size=111872) 2024-12-09T13:31:48,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741992_1168 (size=1597213) 2024-12-09T13:31:48,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741992_1168 (size=1597213) 2024-12-09T13:31:48,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741992_1168 (size=1597213) 2024-12-09T13:31:48,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741993_1169 (size=4188619) 2024-12-09T13:31:48,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741993_1169 (size=4188619) 2024-12-09T13:31:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741993_1169 (size=4188619) 2024-12-09T13:31:48,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741994_1170 (size=127628) 2024-12-09T13:31:48,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741994_1170 (size=127628) 2024-12-09T13:31:48,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741994_1170 (size=127628) 2024-12-09T13:31:48,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741995_1171 (size=29229) 2024-12-09T13:31:48,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741995_1171 (size=29229) 2024-12-09T13:31:48,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741995_1171 (size=29229) 2024-12-09T13:31:48,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741996_1172 (size=45609) 2024-12-09T13:31:48,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741996_1172 (size=45609) 2024-12-09T13:31:48,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741996_1172 (size=45609) 2024-12-09T13:31:49,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741997_1173 (size=8360360) 2024-12-09T13:31:49,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741997_1173 (size=8360360) 2024-12-09T13:31:49,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741997_1173 (size=8360360) 2024-12-09T13:31:49,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741998_1174 (size=1323991) 2024-12-09T13:31:49,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741998_1174 (size=1323991) 2024-12-09T13:31:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741998_1174 (size=1323991) 2024-12-09T13:31:49,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741999_1175 (size=232957) 2024-12-09T13:31:49,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741999_1175 (size=232957) 2024-12-09T13:31:49,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741999_1175 (size=232957) 2024-12-09T13:31:49,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742000_1176 (size=217634) 2024-12-09T13:31:49,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742000_1176 (size=217634) 2024-12-09T13:31:49,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742000_1176 (size=217634) 2024-12-09T13:31:49,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742001_1177 (size=903930) 2024-12-09T13:31:49,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742001_1177 (size=903930) 2024-12-09T13:31:49,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742001_1177 (size=903930) 2024-12-09T13:31:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742002_1178 (size=77835) 2024-12-09T13:31:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742002_1178 (size=77835) 2024-12-09T13:31:49,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742002_1178 (size=77835) 2024-12-09T13:31:49,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742003_1179 (size=1832290) 2024-12-09T13:31:49,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742003_1179 (size=1832290) 2024-12-09T13:31:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742003_1179 (size=1832290) 2024-12-09T13:31:49,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742004_1180 (size=30949) 2024-12-09T13:31:49,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742004_1180 (size=30949) 2024-12-09T13:31:49,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742004_1180 (size=30949) 2024-12-09T13:31:49,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742005_1181 (size=136454) 2024-12-09T13:31:49,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742005_1181 (size=136454) 2024-12-09T13:31:49,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742005_1181 (size=136454) 2024-12-09T13:31:49,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742006_1182 (size=322274) 2024-12-09T13:31:49,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742006_1182 (size=322274) 2024-12-09T13:31:49,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742006_1182 (size=322274) 2024-12-09T13:31:49,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742007_1183 (size=131440) 2024-12-09T13:31:49,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742007_1183 (size=131440) 2024-12-09T13:31:49,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742007_1183 (size=131440) 2024-12-09T13:31:49,104 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:31:49,106 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-09T13:31:49,108 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-12-09T13:31:49,108 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-12-09T13:31:49,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742008_1184 (size=427) 2024-12-09T13:31:49,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742008_1184 (size=427) 2024-12-09T13:31:49,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742008_1184 (size=427) 2024-12-09T13:31:49,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742009_1185 (size=21) 2024-12-09T13:31:49,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742009_1185 (size=21) 2024-12-09T13:31:49,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742009_1185 (size=21) 2024-12-09T13:31:49,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742010_1186 (size=304098) 2024-12-09T13:31:49,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742010_1186 (size=304098) 2024-12-09T13:31:49,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742010_1186 (size=304098) 2024-12-09T13:31:49,148 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:31:49,148 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:31:49,823 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0003_000001 (auth:SIMPLE) from 127.0.0.1:39312 2024-12-09T13:31:50,516 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:31:55,352 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0003_000001 (auth:SIMPLE) from 127.0.0.1:35046 2024-12-09T13:31:55,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742011_1187 (size=349796) 2024-12-09T13:31:55,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742011_1187 (size=349796) 2024-12-09T13:31:55,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742011_1187 (size=349796) 2024-12-09T13:31:56,020 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:31:57,591 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0003_000001 (auth:SIMPLE) from 127.0.0.1:41838 2024-12-09T13:31:57,593 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0003_000001 (auth:SIMPLE) from 127.0.0.1:43950 2024-12-09T13:32:03,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742012_1188 (size=8190) 2024-12-09T13:32:03,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742012_1188 (size=8190) 2024-12-09T13:32:03,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742012_1188 (size=8190) 2024-12-09T13:32:04,180 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000002/launch_container.sh] 2024-12-09T13:32:04,181 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000002/container_tokens] 2024-12-09T13:32:04,181 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000002/sysfs] 2024-12-09T13:32:05,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742014_1190 (size=5424) 2024-12-09T13:32:05,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742014_1190 (size=5424) 2024-12-09T13:32:05,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742014_1190 (size=5424) 2024-12-09T13:32:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742013_1189 (size=22107) 2024-12-09T13:32:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742013_1189 (size=22107) 2024-12-09T13:32:06,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742013_1189 (size=22107) 2024-12-09T13:32:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742015_1191 (size=462) 2024-12-09T13:32:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742015_1191 (size=462) 2024-12-09T13:32:06,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742015_1191 (size=462) 2024-12-09T13:32:06,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742016_1192 (size=22107) 2024-12-09T13:32:06,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742016_1192 (size=22107) 2024-12-09T13:32:06,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742016_1192 (size=22107) 2024-12-09T13:32:06,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742017_1193 (size=349796) 2024-12-09T13:32:06,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742017_1193 (size=349796) 2024-12-09T13:32:06,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742017_1193 (size=349796) 2024-12-09T13:32:06,102 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0003_000001 (auth:SIMPLE) from 127.0.0.1:47674 2024-12-09T13:32:06,113 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733750974572_0003_01_000003 is : 143 2024-12-09T13:32:06,122 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000003/launch_container.sh] 2024-12-09T13:32:06,122 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000003/container_tokens] 2024-12-09T13:32:06,122 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000003/sysfs] 2024-12-09T13:32:07,337 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:32:07,339 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:32:07,345 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-09T13:32:07,345 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:32:07,345 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:32:07,345 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-09T13:32:07,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-09T13:32:07,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-09T13:32:07,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-09T13:32:07,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-09T13:32:07,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751107754/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-09T13:32:07,353 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-09T13:32:07,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-09T13:32:07,356 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751127356"}]},"ts":"1733751127356"} 2024-12-09T13:32:07,358 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-09T13:32:07,358 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-09T13:32:07,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-09T13:32:07,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, UNASSIGN}] 2024-12-09T13:32:07,360 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, UNASSIGN 2024-12-09T13:32:07,360 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, UNASSIGN 2024-12-09T13:32:07,361 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=989e27290d4b0f91a1c8b9750101135e, regionState=CLOSING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:32:07,361 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=29020d606f6c9b790f14faa68fd8562f, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:32:07,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, UNASSIGN because future has completed 2024-12-09T13:32:07,363 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:32:07,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 989e27290d4b0f91a1c8b9750101135e, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:32:07,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, UNASSIGN because future has completed 2024-12-09T13:32:07,364 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:32:07,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29020d606f6c9b790f14faa68fd8562f, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:32:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-09T13:32:07,515 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:32:07,515 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:32:07,515 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 989e27290d4b0f91a1c8b9750101135e, disabling compactions & flushes 2024-12-09T13:32:07,515 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:32:07,515 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:32:07,515 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. after waiting 0 ms 2024-12-09T13:32:07,516 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:32:07,516 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:32:07,516 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:32:07,516 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 29020d606f6c9b790f14faa68fd8562f, disabling compactions & flushes 2024-12-09T13:32:07,516 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:32:07,516 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:32:07,516 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. after waiting 0 ms 2024-12-09T13:32:07,516 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:32:07,519 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T13:32:07,519 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T13:32:07,520 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:32:07,520 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:32:07,520 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f. 2024-12-09T13:32:07,520 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e. 2024-12-09T13:32:07,520 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 29020d606f6c9b790f14faa68fd8562f: Waiting for close lock at 1733751127516Running coprocessor pre-close hooks at 1733751127516Disabling compacts and flushes for region at 1733751127516Disabling writes for close at 1733751127516Writing region close event to WAL at 1733751127517 (+1 ms)Running coprocessor post-close hooks at 1733751127520 (+3 ms)Closed at 1733751127520 2024-12-09T13:32:07,520 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 989e27290d4b0f91a1c8b9750101135e: Waiting for close lock at 1733751127515Running coprocessor pre-close hooks at 1733751127515Disabling compacts and flushes for region at 1733751127515Disabling writes for close at 1733751127516 (+1 ms)Writing region close event to WAL at 1733751127516Running coprocessor post-close hooks at 1733751127520 (+4 ms)Closed at 1733751127520 2024-12-09T13:32:07,522 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:32:07,523 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=29020d606f6c9b790f14faa68fd8562f, regionState=CLOSED 2024-12-09T13:32:07,523 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:32:07,523 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=989e27290d4b0f91a1c8b9750101135e, regionState=CLOSED 2024-12-09T13:32:07,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 29020d606f6c9b790f14faa68fd8562f, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:32:07,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 989e27290d4b0f91a1c8b9750101135e, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:32:07,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=84 2024-12-09T13:32:07,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 29020d606f6c9b790f14faa68fd8562f, server=c48e4b8eb196,39211,1733750968235 in 161 msec 2024-12-09T13:32:07,528 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=29020d606f6c9b790f14faa68fd8562f, UNASSIGN in 168 msec 2024-12-09T13:32:07,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=85 2024-12-09T13:32:07,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 989e27290d4b0f91a1c8b9750101135e, server=c48e4b8eb196,36493,1733750968163 in 163 msec 2024-12-09T13:32:07,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-09T13:32:07,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=989e27290d4b0f91a1c8b9750101135e, UNASSIGN in 169 msec 2024-12-09T13:32:07,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-09T13:32:07,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 172 msec 2024-12-09T13:32:07,532 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751127532"}]},"ts":"1733751127532"} 2024-12-09T13:32:07,533 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-09T13:32:07,533 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-09T13:32:07,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 181 msec 2024-12-09T13:32:07,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-09T13:32:07,673 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-09T13:32:07,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-09T13:32:07,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,678 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-09T13:32:07,679 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,685 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-09T13:32:07,685 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:32:07,685 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:32:07,687 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/recovered.edits] 2024-12-09T13:32:07,688 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/recovered.edits] 2024-12-09T13:32:07,691 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf/de6f727e43364f5f8af9f5939c3acafb to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/cf/de6f727e43364f5f8af9f5939c3acafb 2024-12-09T13:32:07,692 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf/7efec52958c34961a929a6542fa17d3f to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/cf/7efec52958c34961a929a6542fa17d3f 2024-12-09T13:32:07,694 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/recovered.edits/8.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e/recovered.edits/8.seqid 2024-12-09T13:32:07,694 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/recovered.edits/8.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f/recovered.edits/8.seqid 2024-12-09T13:32:07,695 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/989e27290d4b0f91a1c8b9750101135e 2024-12-09T13:32:07,695 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportWithResetTtl/29020d606f6c9b790f14faa68fd8562f 2024-12-09T13:32:07,695 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-09T13:32:07,697 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,699 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-09T13:32:07,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T13:32:07,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T13:32:07,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T13:32:07,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-09T13:32:07,787 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-09T13:32:07,789 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,789 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-09T13:32:07,789 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751127789"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:07,789 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751127789"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:07,792 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:32:07,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,792 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 29020d606f6c9b790f14faa68fd8562f, NAME => 'testExportWithResetTtl,,1733751106734.29020d606f6c9b790f14faa68fd8562f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 989e27290d4b0f91a1c8b9750101135e, NAME => 'testExportWithResetTtl,1,1733751106734.989e27290d4b0f91a1c8b9750101135e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:32:07,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:07,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:07,792 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-09T13:32:07,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-09T13:32:07,792 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751127792"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:07,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:07,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:07,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-09T13:32:07,794 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:07,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:07,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:07,795 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-09T13:32:07,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:07,796 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-09T13:32:07,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 122 msec 2024-12-09T13:32:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-09T13:32:07,905 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-09T13:32:07,905 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-09T13:32:07,906 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-09T13:32:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:07,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-09T13:32:07,911 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751127911"}]},"ts":"1733751127911"} 2024-12-09T13:32:07,913 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-09T13:32:07,913 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-09T13:32:07,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-09T13:32:07,916 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, UNASSIGN}] 2024-12-09T13:32:07,917 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, UNASSIGN 2024-12-09T13:32:07,917 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, UNASSIGN 2024-12-09T13:32:07,918 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=585b6e74652cbb1b9e8003901efa080f, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:32:07,918 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=f0828c080a2e915ab2af5a6e6615ed13, regionState=CLOSING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:32:07,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, UNASSIGN because future has completed 2024-12-09T13:32:07,920 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:32:07,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 585b6e74652cbb1b9e8003901efa080f, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:32:07,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, UNASSIGN because future has completed 2024-12-09T13:32:07,921 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:32:07,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure f0828c080a2e915ab2af5a6e6615ed13, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:32:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-09T13:32:08,072 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 585b6e74652cbb1b9e8003901efa080f, disabling compactions & flushes 2024-12-09T13:32:08,073 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. after waiting 0 ms 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:32:08,073 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:32:08,073 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing f0828c080a2e915ab2af5a6e6615ed13, disabling compactions & flushes 2024-12-09T13:32:08,074 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:32:08,074 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:32:08,074 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. after waiting 0 ms 2024-12-09T13:32:08,074 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:32:08,080 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:32:08,080 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:32:08,081 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:32:08,081 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:32:08,081 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f. 2024-12-09T13:32:08,081 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13. 2024-12-09T13:32:08,081 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 585b6e74652cbb1b9e8003901efa080f: Waiting for close lock at 1733751128073Running coprocessor pre-close hooks at 1733751128073Disabling compacts and flushes for region at 1733751128073Disabling writes for close at 1733751128073Writing region close event to WAL at 1733751128076 (+3 ms)Running coprocessor post-close hooks at 1733751128080 (+4 ms)Closed at 1733751128081 (+1 ms) 2024-12-09T13:32:08,081 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for f0828c080a2e915ab2af5a6e6615ed13: Waiting for close lock at 1733751128073Running coprocessor pre-close hooks at 1733751128073Disabling compacts and flushes for region at 1733751128073Disabling writes for close at 1733751128074 (+1 ms)Writing region close event to WAL at 1733751128077 (+3 ms)Running coprocessor post-close hooks at 1733751128081 (+4 ms)Closed at 1733751128081 2024-12-09T13:32:08,083 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:32:08,083 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=f0828c080a2e915ab2af5a6e6615ed13, regionState=CLOSED 2024-12-09T13:32:08,084 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:32:08,084 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=585b6e74652cbb1b9e8003901efa080f, regionState=CLOSED 2024-12-09T13:32:08,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure f0828c080a2e915ab2af5a6e6615ed13, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:32:08,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 585b6e74652cbb1b9e8003901efa080f, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:32:08,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-09T13:32:08,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure f0828c080a2e915ab2af5a6e6615ed13, server=c48e4b8eb196,36493,1733750968163 in 167 msec 2024-12-09T13:32:08,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-09T13:32:08,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 585b6e74652cbb1b9e8003901efa080f, server=c48e4b8eb196,44683,1733750968041 in 169 msec 2024-12-09T13:32:08,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=f0828c080a2e915ab2af5a6e6615ed13, UNASSIGN in 175 msec 2024-12-09T13:32:08,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-09T13:32:08,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=585b6e74652cbb1b9e8003901efa080f, UNASSIGN in 179 msec 2024-12-09T13:32:08,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-09T13:32:08,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 188 msec 2024-12-09T13:32:08,109 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751128109"}]},"ts":"1733751128109"} 2024-12-09T13:32:08,111 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-09T13:32:08,111 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-09T13:32:08,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 205 msec 2024-12-09T13:32:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-09T13:32:08,224 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T13:32:08,224 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-09T13:32:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:08,226 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-09T13:32:08,227 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:08,229 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-09T13:32:08,230 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:32:08,230 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:32:08,232 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/recovered.edits] 2024-12-09T13:32:08,232 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/recovered.edits] 2024-12-09T13:32:08,235 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf/4cf5ce2e85b649568d5f26a6fc08e313 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/cf/4cf5ce2e85b649568d5f26a6fc08e313 2024-12-09T13:32:08,235 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf/d0e74845a4a142f6b053735307f01583 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/cf/d0e74845a4a142f6b053735307f01583 2024-12-09T13:32:08,238 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13/recovered.edits/9.seqid 2024-12-09T13:32:08,238 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f/recovered.edits/9.seqid 2024-12-09T13:32:08,239 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/f0828c080a2e915ab2af5a6e6615ed13 2024-12-09T13:32:08,239 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithResetTtl/585b6e74652cbb1b9e8003901efa080f 2024-12-09T13:32:08,239 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-09T13:32:08,241 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:08,244 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-09T13:32:08,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T13:32:08,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T13:32:08,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T13:32:08,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-09T13:32:08,260 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-09T13:32:08,261 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:08,261 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-09T13:32:08,262 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751128261"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:08,262 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751128261"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:08,264 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:32:08,264 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f0828c080a2e915ab2af5a6e6615ed13, NAME => 'testtb-testExportWithResetTtl,,1733751105363.f0828c080a2e915ab2af5a6e6615ed13.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 585b6e74652cbb1b9e8003901efa080f, NAME => 'testtb-testExportWithResetTtl,1,1733751105363.585b6e74652cbb1b9e8003901efa080f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:32:08,264 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-09T13:32:08,264 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751128264"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:08,266 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-09T13:32:08,267 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:08,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-09T13:32:08,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 43 msec 2024-12-09T13:32:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-09T13:32:08,373 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-09T13:32:08,373 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-09T13:32:08,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-09T13:32:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-09T13:32:08,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-09T13:32:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-09T13:32:08,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-09T13:32:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-09T13:32:08,413 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=799 (was 795) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:60054 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:32977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:41669 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:53646 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 131494) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1597684326_1 at /127.0.0.1:60014 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2964 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:41072 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=810 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=702 (was 527) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=2001 (was 2224) 2024-12-09T13:32:08,413 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-09T13:32:08,429 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=799, OpenFileDescriptor=810, MaxFileDescriptor=1048576, SystemLoadAverage=702, ProcessCount=19, AvailableMemoryMB=2001 2024-12-09T13:32:08,429 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-09T13:32:08,430 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:32:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:08,432 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:32:08,432 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:32:08,432 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-09T13:32:08,433 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:32:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T13:32:08,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742018_1194 (size=407) 2024-12-09T13:32:08,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742018_1194 (size=407) 2024-12-09T13:32:08,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742018_1194 (size=407) 2024-12-09T13:32:08,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 628b497be0d454eeaf472abb1f4b3ce4, NAME => 'testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:08,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e32b00139b29eb65dd58ba01d37b8470, NAME => 'testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:08,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742019_1195 (size=68) 2024-12-09T13:32:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742019_1195 (size=68) 2024-12-09T13:32:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742019_1195 (size=68) 2024-12-09T13:32:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742020_1196 (size=68) 2024-12-09T13:32:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742020_1196 (size=68) 2024-12-09T13:32:08,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742020_1196 (size=68) 2024-12-09T13:32:08,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T13:32:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 628b497be0d454eeaf472abb1f4b3ce4, disabling compactions & flushes 2024-12-09T13:32:08,851 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing e32b00139b29eb65dd58ba01d37b8470, disabling compactions & flushes 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:08,851 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. after waiting 0 ms 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. after waiting 0 ms 2024-12-09T13:32:08,851 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:08,851 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:08,851 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 628b497be0d454eeaf472abb1f4b3ce4: Waiting for close lock at 1733751128851Disabling compacts and flushes for region at 1733751128851Disabling writes for close at 1733751128851Writing region close event to WAL at 1733751128851Closed at 1733751128851 2024-12-09T13:32:08,852 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for e32b00139b29eb65dd58ba01d37b8470: Waiting for close lock at 1733751128851Disabling compacts and flushes for region at 1733751128851Disabling writes for close at 1733751128851Writing region close event to WAL at 1733751128851Closed at 1733751128851 2024-12-09T13:32:08,853 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:32:08,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733751128853"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751128853"}]},"ts":"1733751128853"} 2024-12-09T13:32:08,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733751128853"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751128853"}]},"ts":"1733751128853"} 2024-12-09T13:32:08,858 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:32:08,859 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:32:08,859 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751128859"}]},"ts":"1733751128859"} 2024-12-09T13:32:08,861 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-09T13:32:08,862 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:32:08,863 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:32:08,863 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:32:08,863 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:32:08,863 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:32:08,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, ASSIGN}] 2024-12-09T13:32:08,865 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, ASSIGN 2024-12-09T13:32:08,865 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, ASSIGN 2024-12-09T13:32:08,865 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:32:08,865 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:32:09,016 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:32:09,016 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=e32b00139b29eb65dd58ba01d37b8470, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:32:09,016 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=628b497be0d454eeaf472abb1f4b3ce4, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:32:09,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, ASSIGN because future has completed 2024-12-09T13:32:09,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:32:09,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, ASSIGN because future has completed 2024-12-09T13:32:09,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure e32b00139b29eb65dd58ba01d37b8470, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:32:09,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T13:32:09,173 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:09,173 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => e32b00139b29eb65dd58ba01d37b8470, NAME => 'testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 628b497be0d454eeaf472abb1f4b3ce4, NAME => 'testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. service=AccessControlService 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. service=AccessControlService 2024-12-09T13:32:09,174 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:32:09,174 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,174 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:09,175 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:09,175 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,175 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,175 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,175 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,176 INFO [StoreOpener-e32b00139b29eb65dd58ba01d37b8470-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,176 INFO [StoreOpener-628b497be0d454eeaf472abb1f4b3ce4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,178 INFO [StoreOpener-628b497be0d454eeaf472abb1f4b3ce4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 628b497be0d454eeaf472abb1f4b3ce4 columnFamilyName cf 2024-12-09T13:32:09,178 INFO [StoreOpener-e32b00139b29eb65dd58ba01d37b8470-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e32b00139b29eb65dd58ba01d37b8470 columnFamilyName cf 2024-12-09T13:32:09,178 DEBUG [StoreOpener-628b497be0d454eeaf472abb1f4b3ce4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:32:09,178 DEBUG [StoreOpener-e32b00139b29eb65dd58ba01d37b8470-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:32:09,178 INFO [StoreOpener-e32b00139b29eb65dd58ba01d37b8470-1 {}] regionserver.HStore(327): Store=e32b00139b29eb65dd58ba01d37b8470/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:32:09,178 INFO [StoreOpener-628b497be0d454eeaf472abb1f4b3ce4-1 {}] regionserver.HStore(327): Store=628b497be0d454eeaf472abb1f4b3ce4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:32:09,178 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,178 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,179 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,179 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,179 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,179 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,180 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,180 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,180 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,180 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,182 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,182 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,185 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:32:09,185 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:32:09,185 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 628b497be0d454eeaf472abb1f4b3ce4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67309527, jitterRate=0.0029901117086410522}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:32:09,185 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened e32b00139b29eb65dd58ba01d37b8470; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70234477, jitterRate=0.046575263142585754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:32:09,185 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,185 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,186 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 628b497be0d454eeaf472abb1f4b3ce4: Running coprocessor pre-open hook at 1733751129175Writing region info on filesystem at 1733751129175Initializing all the Stores at 1733751129176 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751129176Cleaning up temporary data from old regions at 1733751129180 (+4 ms)Running coprocessor post-open hooks at 1733751129185 (+5 ms)Region opened successfully at 1733751129186 (+1 ms) 2024-12-09T13:32:09,186 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for e32b00139b29eb65dd58ba01d37b8470: Running coprocessor pre-open hook at 1733751129175Writing region info on filesystem at 1733751129175Initializing all the Stores at 1733751129176 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751129176Cleaning up temporary data from old regions at 1733751129180 (+4 ms)Running coprocessor post-open hooks at 1733751129185 (+5 ms)Region opened successfully at 1733751129186 (+1 ms) 2024-12-09T13:32:09,187 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4., pid=99, masterSystemTime=1733751129170 2024-12-09T13:32:09,187 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470., pid=100, masterSystemTime=1733751129170 2024-12-09T13:32:09,190 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:09,190 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:09,190 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:09,190 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:09,193 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=e32b00139b29eb65dd58ba01d37b8470, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:32:09,193 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=628b497be0d454eeaf472abb1f4b3ce4, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:32:09,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:32:09,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure e32b00139b29eb65dd58ba01d37b8470, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:32:09,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-09T13:32:09,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4, server=c48e4b8eb196,39211,1733750968235 in 178 msec 2024-12-09T13:32:09,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-09T13:32:09,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure e32b00139b29eb65dd58ba01d37b8470, server=c48e4b8eb196,44683,1733750968041 in 179 msec 2024-12-09T13:32:09,200 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, ASSIGN in 336 msec 2024-12-09T13:32:09,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-09T13:32:09,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, ASSIGN in 337 msec 2024-12-09T13:32:09,202 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:32:09,202 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751129202"}]},"ts":"1733751129202"} 2024-12-09T13:32:09,203 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-09T13:32:09,204 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:32:09,204 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-09T13:32:09,207 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T13:32:09,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:09,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:09,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:09,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:09,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:09,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:09,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:09,259 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:09,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 829 msec 2024-12-09T13:32:09,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-09T13:32:09,573 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T13:32:09,573 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-09T13:32:09,573 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:32:09,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-09T13:32:09,577 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:32:09,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-12-09T13:32:09,577 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:32:09,580 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751129580 (current time:1733751129580). 2024-12-09T13:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-09T13:32:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a09ef65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:09,582 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:09,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:09,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:09,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d189e0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:09,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:09,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,583 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40982, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:09,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ea443d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:09,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:09,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:09,586 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55364, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:09,587 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:09,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,588 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3df43c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:09,589 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:09,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:09,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:09,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@553b7bf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:09,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:09,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,591 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41002, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:09,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a22f6d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:09,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:09,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:09,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:09,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,597 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,597 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T13:32:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:32:09,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-09T13:32:09,600 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:32:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-09T13:32:09,601 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:32:09,603 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:32:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742021_1197 (size=170) 2024-12-09T13:32:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742021_1197 (size=170) 2024-12-09T13:32:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742021_1197 (size=170) 2024-12-09T13:32:09,609 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:32:09,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4}] 2024-12-09T13:32:09,610 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,610 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-09T13:32:09,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:09,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 628b497be0d454eeaf472abb1f4b3ce4: 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. for emptySnaptb0-testExportFileSystemState completed. 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for e32b00139b29eb65dd58ba01d37b8470: 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. for emptySnaptb0-testExportFileSystemState completed. 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:32:09,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:32:09,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742023_1199 (size=71) 2024-12-09T13:32:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742023_1199 (size=71) 2024-12-09T13:32:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742022_1198 (size=71) 2024-12-09T13:32:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742023_1199 (size=71) 2024-12-09T13:32:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742022_1198 (size=71) 2024-12-09T13:32:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742022_1198 (size=71) 2024-12-09T13:32:09,769 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:09,769 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-09T13:32:09,769 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:09,769 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-09T13:32:09,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-09T13:32:09,769 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,769 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:09,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-09T13:32:09,769 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,770 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470 in 161 msec 2024-12-09T13:32:09,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-09T13:32:09,772 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4 in 161 msec 2024-12-09T13:32:09,772 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:32:09,773 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:32:09,773 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:32:09,773 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-09T13:32:09,774 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-09T13:32:09,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742024_1200 (size=552) 2024-12-09T13:32:09,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742024_1200 (size=552) 2024-12-09T13:32:09,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742024_1200 (size=552) 2024-12-09T13:32:09,784 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:32:09,788 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:32:09,789 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-09T13:32:09,790 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:32:09,790 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-09T13:32:09,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 192 msec 2024-12-09T13:32:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-09T13:32:09,913 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T13:32:09,916 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0d36a675f8145dca9b9e1f1f378d2f3b5', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:32:09,918 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1aca55dfd789009fd1b25720bc8490517', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,919 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3e50a72ebd6917f13fb99528aa4da947b', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,919 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='249c016ae557bfcfeca2d1ef2252e071f', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,920 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='4b8d42e8081f4a8263c97fa084043a050', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,921 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='515160d3fd7fa5110126bbde8837b6c3c', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:32:09,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:32:09,927 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:32:09,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-09T13:32:09,929 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:09,929 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:32:09,931 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:32:09,934 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:32:09,939 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:32:09,941 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:32:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751129941 (current time:1733751129941). 2024-12-09T13:32:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:32:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-09T13:32:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:32:09,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47b4a267, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:09,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:09,943 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:09,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:09,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:09,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2deef39d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:09,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:09,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,944 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:09,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7789fb4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:09,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:09,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:09,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:09,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:32:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,948 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f5be3a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:09,949 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:09,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:09,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:09,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cd56e2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:09,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:09,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,950 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41030, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:09,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58e944de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:09,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:09,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:09,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:09,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:09,955 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:32:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:09,956 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T13:32:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:32:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:32:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-09T13:32:09,958 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:32:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T13:32:09,959 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:32:09,961 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:32:09,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742025_1201 (size=165) 2024-12-09T13:32:09,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742025_1201 (size=165) 2024-12-09T13:32:09,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742025_1201 (size=165) 2024-12-09T13:32:09,968 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:32:09,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4}] 2024-12-09T13:32:09,969 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:09,969 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:10,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T13:32:10,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-09T13:32:10,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-09T13:32:10,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:10,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:10,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing e32b00139b29eb65dd58ba01d37b8470 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-09T13:32:10,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 628b497be0d454eeaf472abb1f4b3ce4 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-09T13:32:10,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/.tmp/cf/082c16b01bbd421ea5c71cfedd72f272 is 71, key is 022d5fea183ce993c270f2c0c79527ba/cf:q/1733751129922/Put/seqid=0 2024-12-09T13:32:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742026_1202 (size=5568) 2024-12-09T13:32:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742026_1202 (size=5568) 2024-12-09T13:32:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742026_1202 (size=5568) 2024-12-09T13:32:10,140 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/.tmp/cf/082c16b01bbd421ea5c71cfedd72f272 2024-12-09T13:32:10,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/.tmp/cf/9ff6c1fe965946349ce74529f3cb8ea5 is 71, key is 11e4815499d56661c833f7b831211d01/cf:q/1733751129925/Put/seqid=0 2024-12-09T13:32:10,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/.tmp/cf/082c16b01bbd421ea5c71cfedd72f272 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf/082c16b01bbd421ea5c71cfedd72f272 2024-12-09T13:32:10,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742027_1203 (size=8052) 2024-12-09T13:32:10,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742027_1203 (size=8052) 2024-12-09T13:32:10,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742027_1203 (size=8052) 2024-12-09T13:32:10,148 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/.tmp/cf/9ff6c1fe965946349ce74529f3cb8ea5 2024-12-09T13:32:10,151 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf/082c16b01bbd421ea5c71cfedd72f272, entries=7, sequenceid=6, filesize=5.4 K 2024-12-09T13:32:10,152 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for e32b00139b29eb65dd58ba01d37b8470 in 32ms, sequenceid=6, compaction requested=false 2024-12-09T13:32:10,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-09T13:32:10,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for e32b00139b29eb65dd58ba01d37b8470: 2024-12-09T13:32:10,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. for snaptb0-testExportFileSystemState completed. 2024-12-09T13:32:10,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-09T13:32:10,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:10,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf/082c16b01bbd421ea5c71cfedd72f272] hfiles 2024-12-09T13:32:10,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf/082c16b01bbd421ea5c71cfedd72f272 for snapshot=snaptb0-testExportFileSystemState 2024-12-09T13:32:10,156 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/.tmp/cf/9ff6c1fe965946349ce74529f3cb8ea5 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf/9ff6c1fe965946349ce74529f3cb8ea5 2024-12-09T13:32:10,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742028_1204 (size=110) 2024-12-09T13:32:10,161 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf/9ff6c1fe965946349ce74529f3cb8ea5, entries=43, sequenceid=6, filesize=7.9 K 2024-12-09T13:32:10,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742028_1204 (size=110) 2024-12-09T13:32:10,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742028_1204 (size=110) 2024-12-09T13:32:10,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:10,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-09T13:32:10,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-09T13:32:10,163 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:10,163 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:10,165 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e32b00139b29eb65dd58ba01d37b8470 in 196 msec 2024-12-09T13:32:10,167 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 628b497be0d454eeaf472abb1f4b3ce4 in 47ms, sequenceid=6, compaction requested=false 2024-12-09T13:32:10,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 628b497be0d454eeaf472abb1f4b3ce4: 2024-12-09T13:32:10,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. for snaptb0-testExportFileSystemState completed. 2024-12-09T13:32:10,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-09T13:32:10,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:10,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf/9ff6c1fe965946349ce74529f3cb8ea5] hfiles 2024-12-09T13:32:10,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf/9ff6c1fe965946349ce74529f3cb8ea5 for snapshot=snaptb0-testExportFileSystemState 2024-12-09T13:32:10,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742029_1205 (size=110) 2024-12-09T13:32:10,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742029_1205 (size=110) 2024-12-09T13:32:10,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742029_1205 (size=110) 2024-12-09T13:32:10,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:10,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-09T13:32:10,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-09T13:32:10,175 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:10,175 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:10,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-09T13:32:10,177 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:32:10,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4 in 207 msec 2024-12-09T13:32:10,177 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:32:10,178 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:32:10,178 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-09T13:32:10,178 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-09T13:32:10,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742030_1206 (size=630) 2024-12-09T13:32:10,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742030_1206 (size=630) 2024-12-09T13:32:10,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742030_1206 (size=630) 2024-12-09T13:32:10,188 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:32:10,193 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:32:10,194 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-09T13:32:10,196 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:32:10,196 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-09T13:32:10,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 239 msec 2024-12-09T13:32:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-09T13:32:10,273 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T13:32:10,273 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273 2024-12-09T13:32:10,273 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:10,300 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:10,300 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-09T13:32:10,301 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:32:10,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-09T13:32:10,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742032_1208 (size=630) 2024-12-09T13:32:10,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742032_1208 (size=630) 2024-12-09T13:32:10,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742031_1207 (size=165) 2024-12-09T13:32:10,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742031_1207 (size=165) 2024-12-09T13:32:10,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742031_1207 (size=165) 2024-12-09T13:32:10,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742032_1208 (size=630) 2024-12-09T13:32:10,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:10,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:10,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-15886207045337330019.jar 2024-12-09T13:32:11,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-4025533484414389702.jar 2024-12-09T13:32:11,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:11,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:32:11,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:32:11,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:32:11,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:32:11,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:32:11,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:32:11,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:32:11,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:32:11,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:32:11,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:32:11,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:32:11,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:11,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:11,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:11,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:11,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:11,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:11,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:11,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742033_1209 (size=5175431) 2024-12-09T13:32:11,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742033_1209 (size=5175431) 2024-12-09T13:32:11,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742033_1209 (size=5175431) 2024-12-09T13:32:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742034_1210 (size=1877034) 2024-12-09T13:32:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742034_1210 (size=1877034) 2024-12-09T13:32:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742034_1210 (size=1877034) 2024-12-09T13:32:11,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742035_1211 (size=20406) 2024-12-09T13:32:11,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742035_1211 (size=20406) 2024-12-09T13:32:11,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742035_1211 (size=20406) 2024-12-09T13:32:11,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742036_1212 (size=24096) 2024-12-09T13:32:11,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742036_1212 (size=24096) 2024-12-09T13:32:11,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742036_1212 (size=24096) 2024-12-09T13:32:11,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742037_1213 (size=503880) 2024-12-09T13:32:11,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742037_1213 (size=503880) 2024-12-09T13:32:11,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742037_1213 (size=503880) 2024-12-09T13:32:11,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742038_1214 (size=4695811) 2024-12-09T13:32:11,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742038_1214 (size=4695811) 2024-12-09T13:32:11,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742038_1214 (size=4695811) 2024-12-09T13:32:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742039_1215 (size=443172) 2024-12-09T13:32:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742039_1215 (size=443172) 2024-12-09T13:32:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742039_1215 (size=443172) 2024-12-09T13:32:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742040_1216 (size=111872) 2024-12-09T13:32:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742040_1216 (size=111872) 2024-12-09T13:32:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742040_1216 (size=111872) 2024-12-09T13:32:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742041_1217 (size=1597213) 2024-12-09T13:32:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742041_1217 (size=1597213) 2024-12-09T13:32:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742041_1217 (size=1597213) 2024-12-09T13:32:11,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742042_1218 (size=4188619) 2024-12-09T13:32:11,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742042_1218 (size=4188619) 2024-12-09T13:32:11,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742042_1218 (size=4188619) 2024-12-09T13:32:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742043_1219 (size=127628) 2024-12-09T13:32:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742043_1219 (size=127628) 2024-12-09T13:32:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742043_1219 (size=127628) 2024-12-09T13:32:11,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742044_1220 (size=29229) 2024-12-09T13:32:11,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742044_1220 (size=29229) 2024-12-09T13:32:11,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742044_1220 (size=29229) 2024-12-09T13:32:11,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742045_1221 (size=45609) 2024-12-09T13:32:11,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742045_1221 (size=45609) 2024-12-09T13:32:11,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742045_1221 (size=45609) 2024-12-09T13:32:11,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742046_1222 (size=8360360) 2024-12-09T13:32:11,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742046_1222 (size=8360360) 2024-12-09T13:32:11,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742046_1222 (size=8360360) 2024-12-09T13:32:11,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742047_1223 (size=1323991) 2024-12-09T13:32:11,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742047_1223 (size=1323991) 2024-12-09T13:32:11,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742047_1223 (size=1323991) 2024-12-09T13:32:11,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742048_1224 (size=232957) 2024-12-09T13:32:11,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742048_1224 (size=232957) 2024-12-09T13:32:11,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742048_1224 (size=232957) 2024-12-09T13:32:11,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742049_1225 (size=217634) 2024-12-09T13:32:11,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742049_1225 (size=217634) 2024-12-09T13:32:11,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742049_1225 (size=217634) 2024-12-09T13:32:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742050_1226 (size=903930) 2024-12-09T13:32:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742050_1226 (size=903930) 2024-12-09T13:32:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742050_1226 (size=903930) 2024-12-09T13:32:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742051_1227 (size=77835) 2024-12-09T13:32:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742051_1227 (size=77835) 2024-12-09T13:32:11,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742051_1227 (size=77835) 2024-12-09T13:32:11,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742052_1228 (size=6425017) 2024-12-09T13:32:11,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742052_1228 (size=6425017) 2024-12-09T13:32:11,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742052_1228 (size=6425017) 2024-12-09T13:32:11,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742053_1229 (size=1832290) 2024-12-09T13:32:11,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742053_1229 (size=1832290) 2024-12-09T13:32:11,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742053_1229 (size=1832290) 2024-12-09T13:32:11,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742054_1230 (size=30949) 2024-12-09T13:32:11,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742054_1230 (size=30949) 2024-12-09T13:32:11,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742054_1230 (size=30949) 2024-12-09T13:32:11,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742055_1231 (size=136454) 2024-12-09T13:32:11,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742055_1231 (size=136454) 2024-12-09T13:32:11,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742055_1231 (size=136454) 2024-12-09T13:32:11,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742056_1232 (size=322274) 2024-12-09T13:32:11,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742056_1232 (size=322274) 2024-12-09T13:32:11,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742056_1232 (size=322274) 2024-12-09T13:32:11,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742057_1233 (size=131440) 2024-12-09T13:32:11,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742057_1233 (size=131440) 2024-12-09T13:32:11,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742057_1233 (size=131440) 2024-12-09T13:32:11,891 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:32:11,894 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-09T13:32:11,895 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=7.9 K 2024-12-09T13:32:11,895 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.4 K 2024-12-09T13:32:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742058_1234 (size=447) 2024-12-09T13:32:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742058_1234 (size=447) 2024-12-09T13:32:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742058_1234 (size=447) 2024-12-09T13:32:11,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742059_1235 (size=21) 2024-12-09T13:32:11,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742059_1235 (size=21) 2024-12-09T13:32:11,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742059_1235 (size=21) 2024-12-09T13:32:11,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742060_1236 (size=304110) 2024-12-09T13:32:11,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742060_1236 (size=304110) 2024-12-09T13:32:11,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742060_1236 (size=304110) 2024-12-09T13:32:12,162 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:32:12,162 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:32:12,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0003_000001 (auth:SIMPLE) from 127.0.0.1:57468 2024-12-09T13:32:12,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_2/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000001/launch_container.sh] 2024-12-09T13:32:12,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_2/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000001/container_tokens] 2024-12-09T13:32:12,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_2/usercache/jenkins/appcache/application_1733750974572_0003/container_1733750974572_0003_01_000001/sysfs] 2024-12-09T13:32:13,049 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:32:13,122 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0004_000001 (auth:SIMPLE) from 127.0.0.1:48452 2024-12-09T13:32:17,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-09T13:32:17,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-09T13:32:17,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-09T13:32:17,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-09T13:32:18,605 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0004_000001 (auth:SIMPLE) from 127.0.0.1:53104 2024-12-09T13:32:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742061_1237 (size=349808) 2024-12-09T13:32:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742061_1237 (size=349808) 2024-12-09T13:32:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742061_1237 (size=349808) 2024-12-09T13:32:20,968 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0004_000001 (auth:SIMPLE) from 127.0.0.1:60806 2024-12-09T13:32:20,974 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0004_000001 (auth:SIMPLE) from 127.0.0.1:43642 2024-12-09T13:32:22,988 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:32:26,021 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:32:28,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742062_1238 (size=8052) 2024-12-09T13:32:28,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742062_1238 (size=8052) 2024-12-09T13:32:28,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742062_1238 (size=8052) 2024-12-09T13:32:28,454 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_3/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000002/launch_container.sh] 2024-12-09T13:32:28,454 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_3/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000002/container_tokens] 2024-12-09T13:32:28,454 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_3/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000002/sysfs] 2024-12-09T13:32:29,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742064_1240 (size=5568) 2024-12-09T13:32:29,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742064_1240 (size=5568) 2024-12-09T13:32:29,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742064_1240 (size=5568) 2024-12-09T13:32:29,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742063_1239 (size=22163) 2024-12-09T13:32:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742063_1239 (size=22163) 2024-12-09T13:32:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742063_1239 (size=22163) 2024-12-09T13:32:29,698 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000003/launch_container.sh] 2024-12-09T13:32:29,698 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000003/container_tokens] 2024-12-09T13:32:29,698 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000003/sysfs] 2024-12-09T13:32:30,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742065_1241 (size=466) 2024-12-09T13:32:30,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742065_1241 (size=466) 2024-12-09T13:32:30,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742065_1241 (size=466) 2024-12-09T13:32:30,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742066_1242 (size=22163) 2024-12-09T13:32:30,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742066_1242 (size=22163) 2024-12-09T13:32:30,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742066_1242 (size=22163) 2024-12-09T13:32:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742067_1243 (size=349808) 2024-12-09T13:32:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742067_1243 (size=349808) 2024-12-09T13:32:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742067_1243 (size=349808) 2024-12-09T13:32:30,948 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 628b497be0d454eeaf472abb1f4b3ce4 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:32:30,948 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e32b00139b29eb65dd58ba01d37b8470 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:32:32,110 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:32:32,110 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:32:32,117 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-09T13:32:32,117 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:32:32,117 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:32:32,117 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-09T13:32:32,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-09T13:32:32,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-09T13:32:32,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-09T13:32:32,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-09T13:32:32,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751130273/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-09T13:32:32,124 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-09T13:32:32,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-09T13:32:32,127 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751152127"}]},"ts":"1733751152127"} 2024-12-09T13:32:32,129 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-09T13:32:32,129 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-09T13:32:32,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-09T13:32:32,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, UNASSIGN}] 2024-12-09T13:32:32,131 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, UNASSIGN 2024-12-09T13:32:32,131 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, UNASSIGN 2024-12-09T13:32:32,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=628b497be0d454eeaf472abb1f4b3ce4, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:32:32,132 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=e32b00139b29eb65dd58ba01d37b8470, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:32:32,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, UNASSIGN because future has completed 2024-12-09T13:32:32,133 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:32:32,133 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:32:32,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, UNASSIGN because future has completed 2024-12-09T13:32:32,134 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:32:32,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure e32b00139b29eb65dd58ba01d37b8470, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:32:32,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-09T13:32:32,285 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:32,285 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:32:32,285 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 628b497be0d454eeaf472abb1f4b3ce4, disabling compactions & flushes 2024-12-09T13:32:32,285 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:32,285 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:32,285 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. after waiting 0 ms 2024-12-09T13:32:32,285 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:32,286 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:32,286 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:32:32,286 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing e32b00139b29eb65dd58ba01d37b8470, disabling compactions & flushes 2024-12-09T13:32:32,286 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:32,287 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:32,287 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. after waiting 0 ms 2024-12-09T13:32:32,287 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:32,290 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:32:32,290 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:32:32,290 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:32:32,290 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:32:32,290 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470. 2024-12-09T13:32:32,290 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4. 2024-12-09T13:32:32,290 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for e32b00139b29eb65dd58ba01d37b8470: Waiting for close lock at 1733751152286Running coprocessor pre-close hooks at 1733751152286Disabling compacts and flushes for region at 1733751152286Disabling writes for close at 1733751152287 (+1 ms)Writing region close event to WAL at 1733751152287Running coprocessor post-close hooks at 1733751152290 (+3 ms)Closed at 1733751152290 2024-12-09T13:32:32,290 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 628b497be0d454eeaf472abb1f4b3ce4: Waiting for close lock at 1733751152285Running coprocessor pre-close hooks at 1733751152285Disabling compacts and flushes for region at 1733751152285Disabling writes for close at 1733751152285Writing region close event to WAL at 1733751152286 (+1 ms)Running coprocessor post-close hooks at 1733751152290 (+4 ms)Closed at 1733751152290 2024-12-09T13:32:32,292 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:32,293 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=e32b00139b29eb65dd58ba01d37b8470, regionState=CLOSED 2024-12-09T13:32:32,293 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:32,294 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=628b497be0d454eeaf472abb1f4b3ce4, regionState=CLOSED 2024-12-09T13:32:32,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure e32b00139b29eb65dd58ba01d37b8470, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:32:32,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:32:32,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-09T13:32:32,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure e32b00139b29eb65dd58ba01d37b8470, server=c48e4b8eb196,44683,1733750968041 in 161 msec 2024-12-09T13:32:32,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-09T13:32:32,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e32b00139b29eb65dd58ba01d37b8470, UNASSIGN in 167 msec 2024-12-09T13:32:32,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 628b497be0d454eeaf472abb1f4b3ce4, server=c48e4b8eb196,39211,1733750968235 in 163 msec 2024-12-09T13:32:32,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-09T13:32:32,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=628b497be0d454eeaf472abb1f4b3ce4, UNASSIGN in 169 msec 2024-12-09T13:32:32,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-09T13:32:32,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 172 msec 2024-12-09T13:32:32,303 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751152303"}]},"ts":"1733751152303"} 2024-12-09T13:32:32,305 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-09T13:32:32,305 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-09T13:32:32,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 181 msec 2024-12-09T13:32:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-09T13:32:32,443 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T13:32:32,443 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-09T13:32:32,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,445 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-09T13:32:32,445 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,448 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-09T13:32:32,449 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:32,449 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:32,451 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/recovered.edits] 2024-12-09T13:32:32,451 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/recovered.edits] 2024-12-09T13:32:32,453 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf/082c16b01bbd421ea5c71cfedd72f272 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/cf/082c16b01bbd421ea5c71cfedd72f272 2024-12-09T13:32:32,453 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf/9ff6c1fe965946349ce74529f3cb8ea5 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/cf/9ff6c1fe965946349ce74529f3cb8ea5 2024-12-09T13:32:32,456 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470/recovered.edits/9.seqid 2024-12-09T13:32:32,456 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4/recovered.edits/9.seqid 2024-12-09T13:32:32,456 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/e32b00139b29eb65dd58ba01d37b8470 2024-12-09T13:32:32,456 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemState/628b497be0d454eeaf472abb1f4b3ce4 2024-12-09T13:32:32,457 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-09T13:32:32,458 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,460 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-09T13:32:32,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T13:32:32,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T13:32:32,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T13:32:32,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-09T13:32:32,499 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-09T13:32:32,500 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,500 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-09T13:32:32,500 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751152500"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:32,500 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751152500"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:32,502 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:32:32,503 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e32b00139b29eb65dd58ba01d37b8470, NAME => 'testtb-testExportFileSystemState,,1733751128430.e32b00139b29eb65dd58ba01d37b8470.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 628b497be0d454eeaf472abb1f4b3ce4, NAME => 'testtb-testExportFileSystemState,1,1733751128430.628b497be0d454eeaf472abb1f4b3ce4.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:32:32,503 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-09T13:32:32,503 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751152503"}]},"ts":"9223372036854775807"} 2024-12-09T13:32:32,505 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-09T13:32:32,505 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:32,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:32,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-09T13:32:32,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 63 msec 2024-12-09T13:32:32,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-09T13:32:32,613 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-09T13:32:32,613 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-09T13:32:32,619 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-09T13:32:32,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-09T13:32:32,622 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-09T13:32:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-09T13:32:32,639 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=805 (was 799) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1496153467_1 at /127.0.0.1:51912 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:36213 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 135053) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:40360 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1496153467_1 at /127.0.0.1:54704 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:60798 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3704 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:50200 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34919 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36213 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 810) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=672 (was 702), ProcessCount=19 (was 19), AvailableMemoryMB=1804 (was 2001) 2024-12-09T13:32:32,639 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-09T13:32:32,653 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=805, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=672, ProcessCount=19, AvailableMemoryMB=1804 2024-12-09T13:32:32,653 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-09T13:32:32,655 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:32:32,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:32:32,656 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:32:32,656 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:32:32,656 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-09T13:32:32,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T13:32:32,657 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:32:32,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742068_1244 (size=404) 2024-12-09T13:32:32,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742068_1244 (size=404) 2024-12-09T13:32:32,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742068_1244 (size=404) 2024-12-09T13:32:32,665 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 267eb8d1a57e74c58845ab8327206bdd, NAME => 'testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:32,665 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 437fd113967029188173f60a096ceda8, NAME => 'testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742070_1246 (size=65) 2024-12-09T13:32:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742069_1245 (size=65) 2024-12-09T13:32:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742070_1246 (size=65) 2024-12-09T13:32:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742069_1245 (size=65) 2024-12-09T13:32:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742070_1246 (size=65) 2024-12-09T13:32:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742069_1245 (size=65) 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 267eb8d1a57e74c58845ab8327206bdd, disabling compactions & flushes 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 437fd113967029188173f60a096ceda8, disabling compactions & flushes 2024-12-09T13:32:32,672 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:32,672 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. after waiting 0 ms 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:32,672 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. after waiting 0 ms 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 267eb8d1a57e74c58845ab8327206bdd: Waiting for close lock at 1733751152672Disabling compacts and flushes for region at 1733751152672Disabling writes for close at 1733751152672Writing region close event to WAL at 1733751152672Closed at 1733751152672 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:32,672 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:32,672 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 437fd113967029188173f60a096ceda8: Waiting for close lock at 1733751152672Disabling compacts and flushes for region at 1733751152672Disabling writes for close at 1733751152672Writing region close event to WAL at 1733751152672Closed at 1733751152672 2024-12-09T13:32:32,673 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:32:32,673 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733751152673"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751152673"}]},"ts":"1733751152673"} 2024-12-09T13:32:32,674 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733751152673"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751152673"}]},"ts":"1733751152673"} 2024-12-09T13:32:32,676 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:32:32,676 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:32:32,676 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751152676"}]},"ts":"1733751152676"} 2024-12-09T13:32:32,678 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-09T13:32:32,678 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:32:32,679 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:32:32,679 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:32:32,679 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:32:32,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:32:32,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, ASSIGN}] 2024-12-09T13:32:32,680 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, ASSIGN 2024-12-09T13:32:32,680 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, ASSIGN 2024-12-09T13:32:32,681 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:32:32,681 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:32:32,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T13:32:32,832 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:32:32,832 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=437fd113967029188173f60a096ceda8, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:32:32,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=267eb8d1a57e74c58845ab8327206bdd, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:32:32,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, ASSIGN because future has completed 2024-12-09T13:32:32,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 267eb8d1a57e74c58845ab8327206bdd, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:32:32,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, ASSIGN because future has completed 2024-12-09T13:32:32,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 437fd113967029188173f60a096ceda8, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:32:32,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T13:32:32,990 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:32,990 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:32,990 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 267eb8d1a57e74c58845ab8327206bdd, NAME => 'testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:32:32,990 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 437fd113967029188173f60a096ceda8, NAME => 'testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:32:32,990 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. service=AccessControlService 2024-12-09T13:32:32,990 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. service=AccessControlService 2024-12-09T13:32:32,990 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:32:32,990 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,991 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,992 INFO [StoreOpener-267eb8d1a57e74c58845ab8327206bdd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,994 INFO [StoreOpener-267eb8d1a57e74c58845ab8327206bdd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 267eb8d1a57e74c58845ab8327206bdd columnFamilyName cf 2024-12-09T13:32:32,994 DEBUG [StoreOpener-267eb8d1a57e74c58845ab8327206bdd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:32:32,995 INFO [StoreOpener-267eb8d1a57e74c58845ab8327206bdd-1 {}] regionserver.HStore(327): Store=267eb8d1a57e74c58845ab8327206bdd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:32:32,995 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,995 INFO [StoreOpener-437fd113967029188173f60a096ceda8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,996 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,996 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,996 INFO [StoreOpener-437fd113967029188173f60a096ceda8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 437fd113967029188173f60a096ceda8 columnFamilyName cf 2024-12-09T13:32:32,996 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,996 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,996 DEBUG [StoreOpener-437fd113967029188173f60a096ceda8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:32:32,997 INFO [StoreOpener-437fd113967029188173f60a096ceda8-1 {}] regionserver.HStore(327): Store=437fd113967029188173f60a096ceda8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:32:32,997 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,998 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,998 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:32,998 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,998 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:32,998 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,000 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,000 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:32:33,001 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 267eb8d1a57e74c58845ab8327206bdd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73158318, jitterRate=0.09014388918876648}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:32:33,001 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,002 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 267eb8d1a57e74c58845ab8327206bdd: Running coprocessor pre-open hook at 1733751152991Writing region info on filesystem at 1733751152991Initializing all the Stores at 1733751152992 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751152992Cleaning up temporary data from old regions at 1733751152996 (+4 ms)Running coprocessor post-open hooks at 1733751153001 (+5 ms)Region opened successfully at 1733751153001 2024-12-09T13:32:33,002 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd., pid=117, masterSystemTime=1733751152987 2024-12-09T13:32:33,003 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:32:33,004 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 437fd113967029188173f60a096ceda8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66355014, jitterRate=-0.011233240365982056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:32:33,004 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,004 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 437fd113967029188173f60a096ceda8: Running coprocessor pre-open hook at 1733751152991Writing region info on filesystem at 1733751152991Initializing all the Stores at 1733751152992 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751152992Cleaning up temporary data from old regions at 1733751152998 (+6 ms)Running coprocessor post-open hooks at 1733751153004 (+6 ms)Region opened successfully at 1733751153004 2024-12-09T13:32:33,004 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:33,004 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:33,005 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8., pid=118, masterSystemTime=1733751152988 2024-12-09T13:32:33,005 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=267eb8d1a57e74c58845ab8327206bdd, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:32:33,006 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,006 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 267eb8d1a57e74c58845ab8327206bdd, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:32:33,007 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=437fd113967029188173f60a096ceda8, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:32:33,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 437fd113967029188173f60a096ceda8, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:32:33,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-12-09T13:32:33,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 267eb8d1a57e74c58845ab8327206bdd, server=c48e4b8eb196,39211,1733750968235 in 173 msec 2024-12-09T13:32:33,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-12-09T13:32:33,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, ASSIGN in 331 msec 2024-12-09T13:32:33,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 437fd113967029188173f60a096ceda8, server=c48e4b8eb196,44683,1733750968041 in 174 msec 2024-12-09T13:32:33,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-09T13:32:33,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, ASSIGN in 332 msec 2024-12-09T13:32:33,014 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:32:33,014 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751153014"}]},"ts":"1733751153014"} 2024-12-09T13:32:33,016 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-09T13:32:33,016 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:32:33,017 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-09T13:32:33,020 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-09T13:32:33,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:33,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:33,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:33,039 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:32:33,048 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:33,048 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:33,048 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:33,048 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-09T13:32:33,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 393 msec 2024-12-09T13:32:33,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-09T13:32:33,283 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T13:32:33,283 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-09T13:32:33,284 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:32:33,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-09T13:32:33,288 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:32:33,288 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-12-09T13:32:33,288 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T13:32:33,291 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T13:32:33,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751153291 (current time:1733751153291). 2024-12-09T13:32:33,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:32:33,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-09T13:32:33,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:32:33,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40c18a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:33,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:33,293 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b3c3e10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:33,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:33,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,296 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:33,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24911b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:33,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:33,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:33,299 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:33,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:32:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,301 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22f8a3f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:33,303 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7da16eac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:33,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,304 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:33,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f2eb0b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:33,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:33,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:33,306 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:33,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:33,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:32:33,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,309 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-09T13:32:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:32:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T13:32:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-09T13:32:33,311 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:32:33,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T13:32:33,312 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:32:33,313 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:32:33,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742071_1247 (size=161) 2024-12-09T13:32:33,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742071_1247 (size=161) 2024-12-09T13:32:33,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742071_1247 (size=161) 2024-12-09T13:32:33,320 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:32:33,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd}] 2024-12-09T13:32:33,321 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,321 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T13:32:33,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-09T13:32:33,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-09T13:32:33,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:33,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 437fd113967029188173f60a096ceda8: 2024-12-09T13:32:33,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 267eb8d1a57e74c58845ab8327206bdd: 2024-12-09T13:32:33,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. for emptySnaptb0-testConsecutiveExports completed. 2024-12-09T13:32:33,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. for emptySnaptb0-testConsecutiveExports completed. 2024-12-09T13:32:33,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-09T13:32:33,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-09T13:32:33,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:33,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:33,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:32:33,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:32:33,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742073_1249 (size=68) 2024-12-09T13:32:33,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742072_1248 (size=68) 2024-12-09T13:32:33,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742073_1249 (size=68) 2024-12-09T13:32:33,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742073_1249 (size=68) 2024-12-09T13:32:33,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742072_1248 (size=68) 2024-12-09T13:32:33,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742072_1248 (size=68) 2024-12-09T13:32:33,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:33,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-09T13:32:33,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-09T13:32:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-09T13:32:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-09T13:32:33,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,488 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,488 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,488 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8 in 169 msec 2024-12-09T13:32:33,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-09T13:32:33,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd in 169 msec 2024-12-09T13:32:33,491 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:32:33,492 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:32:33,492 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:32:33,492 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-09T13:32:33,493 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-09T13:32:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742074_1250 (size=543) 2024-12-09T13:32:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742074_1250 (size=543) 2024-12-09T13:32:33,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742074_1250 (size=543) 2024-12-09T13:32:33,511 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:32:33,514 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:32:33,515 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-09T13:32:33,516 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:32:33,516 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-09T13:32:33,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 207 msec 2024-12-09T13:32:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-09T13:32:33,623 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T13:32:33,628 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='01903e09787e1aa3831326b3f88ac1bfd', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:32:33,629 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1ee0f4c26cc8220acc3ff5da21e972ea8', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:33,631 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='24fd0e8760025cf88ec9585d12131365f', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:33,631 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='344650330f7cec0bfa5e3bb0200ea22ba', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:33,632 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='4637275920997c065658fb7ed6a625043', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:33,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:32:33,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:32:33,642 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T13:32:33,644 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-09T13:32:33,644 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,645 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:32:33,646 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T13:32:33,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T13:32:33,654 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-09T13:32:33,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T13:32:33,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751153656 (current time:1733751153656). 2024-12-09T13:32:33,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:32:33,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-09T13:32:33,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:32:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bb1b12f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:33,657 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:33,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:33,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:33,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25140c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:33,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:33,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,658 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56622, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:33,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41aa85d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:33,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:33,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:33,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58082, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:33,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:32:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,662 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bbce941, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:32:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:32:33,663 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:32:33,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:32:33,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:32:33,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59bda4b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:32:33,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:32:33,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,664 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:32:33,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d94840f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:32:33,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:32:33,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:32:33,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:32:33,667 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:32:33,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:32:33,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:32:33,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:32:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:32:33,671 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:32:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-09T13:32:33,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:32:33,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-09T13:32:33,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-09T13:32:33,674 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:32:33,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T13:32:33,675 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:32:33,678 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:32:33,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742075_1251 (size=156) 2024-12-09T13:32:33,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742075_1251 (size=156) 2024-12-09T13:32:33,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742075_1251 (size=156) 2024-12-09T13:32:33,692 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:32:33,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd}] 2024-12-09T13:32:33,693 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,693 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T13:32:33,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-09T13:32:33,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-09T13:32:33,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:33,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,846 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 437fd113967029188173f60a096ceda8 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-09T13:32:33,846 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 267eb8d1a57e74c58845ab8327206bdd 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-09T13:32:33,866 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/.tmp/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb is 71, key is 127a341cb18b2e414db2057492fb5697/cf:q/1733751153641/Put/seqid=0 2024-12-09T13:32:33,866 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/.tmp/cf/53eee90d0d0a4040bb582c25d9f7f1eb is 69, key is 01903e09787e1aa3831326b3f88ac1bfd/cf:q/1733751153638/Put/seqid=0 2024-12-09T13:32:33,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742077_1253 (size=5149) 2024-12-09T13:32:33,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742077_1253 (size=5149) 2024-12-09T13:32:33,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742077_1253 (size=5149) 2024-12-09T13:32:33,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742076_1252 (size=8460) 2024-12-09T13:32:33,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742076_1252 (size=8460) 2024-12-09T13:32:33,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742076_1252 (size=8460) 2024-12-09T13:32:33,872 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/.tmp/cf/53eee90d0d0a4040bb582c25d9f7f1eb 2024-12-09T13:32:33,873 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/.tmp/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb 2024-12-09T13:32:33,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/.tmp/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb 2024-12-09T13:32:33,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/.tmp/cf/53eee90d0d0a4040bb582c25d9f7f1eb as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf/53eee90d0d0a4040bb582c25d9f7f1eb 2024-12-09T13:32:33,880 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf/53eee90d0d0a4040bb582c25d9f7f1eb, entries=1, sequenceid=6, filesize=5.0 K 2024-12-09T13:32:33,880 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb, entries=49, sequenceid=6, filesize=8.3 K 2024-12-09T13:32:33,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 267eb8d1a57e74c58845ab8327206bdd in 35ms, sequenceid=6, compaction requested=false 2024-12-09T13:32:33,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 437fd113967029188173f60a096ceda8 in 35ms, sequenceid=6, compaction requested=false 2024-12-09T13:32:33,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-09T13:32:33,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 267eb8d1a57e74c58845ab8327206bdd: 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 437fd113967029188173f60a096ceda8: 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. for snaptb0-testConsecutiveExports completed. 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. for snaptb0-testConsecutiveExports completed. 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf/53eee90d0d0a4040bb582c25d9f7f1eb] hfiles 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb] hfiles 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf/53eee90d0d0a4040bb582c25d9f7f1eb for snapshot=snaptb0-testConsecutiveExports 2024-12-09T13:32:33,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb for snapshot=snaptb0-testConsecutiveExports 2024-12-09T13:32:33,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742078_1254 (size=107) 2024-12-09T13:32:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742078_1254 (size=107) 2024-12-09T13:32:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742078_1254 (size=107) 2024-12-09T13:32:33,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:32:33,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-09T13:32:33,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-09T13:32:33,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,889 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8 2024-12-09T13:32:33,891 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 437fd113967029188173f60a096ceda8 in 198 msec 2024-12-09T13:32:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742079_1255 (size=107) 2024-12-09T13:32:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742079_1255 (size=107) 2024-12-09T13:32:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742079_1255 (size=107) 2024-12-09T13:32:33,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:32:33,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-09T13:32:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-09T13:32:33,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,893 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:32:33,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-09T13:32:33,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 267eb8d1a57e74c58845ab8327206bdd in 202 msec 2024-12-09T13:32:33,895 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:32:33,896 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:32:33,896 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:32:33,896 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-09T13:32:33,897 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T13:32:33,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742080_1256 (size=621) 2024-12-09T13:32:33,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742080_1256 (size=621) 2024-12-09T13:32:33,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742080_1256 (size=621) 2024-12-09T13:32:33,909 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:32:33,913 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:32:33,913 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T13:32:33,915 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:32:33,915 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-09T13:32:33,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 243 msec 2024-12-09T13:32:33,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-09T13:32:33,994 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T13:32:33,994 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994 2024-12-09T13:32:33,995 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:34,025 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:34,025 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@34003054, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T13:32:34,027 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:32:34,030 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T13:32:34,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-3693822074932472801.jar 2024-12-09T13:32:34,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-14513061101323802961.jar 2024-12-09T13:32:34,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:34,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:32:34,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:32:34,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:32:34,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:32:34,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:32:34,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:32:34,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:32:34,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:32:34,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:32:34,925 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:32:34,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:32:34,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:34,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:34,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:34,926 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:34,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:34,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:34,927 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:34,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742081_1257 (size=443172) 2024-12-09T13:32:34,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742081_1257 (size=443172) 2024-12-09T13:32:34,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742081_1257 (size=443172) 2024-12-09T13:32:34,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742082_1258 (size=5175431) 2024-12-09T13:32:34,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742082_1258 (size=5175431) 2024-12-09T13:32:34,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742082_1258 (size=5175431) 2024-12-09T13:32:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742083_1259 (size=1877034) 2024-12-09T13:32:35,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742083_1259 (size=1877034) 2024-12-09T13:32:35,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742083_1259 (size=1877034) 2024-12-09T13:32:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742084_1260 (size=20406) 2024-12-09T13:32:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742084_1260 (size=20406) 2024-12-09T13:32:35,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742084_1260 (size=20406) 2024-12-09T13:32:35,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742085_1261 (size=24096) 2024-12-09T13:32:35,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742085_1261 (size=24096) 2024-12-09T13:32:35,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742085_1261 (size=24096) 2024-12-09T13:32:35,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742086_1262 (size=503880) 2024-12-09T13:32:35,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742086_1262 (size=503880) 2024-12-09T13:32:35,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742086_1262 (size=503880) 2024-12-09T13:32:35,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742087_1263 (size=4695811) 2024-12-09T13:32:35,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742087_1263 (size=4695811) 2024-12-09T13:32:35,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742087_1263 (size=4695811) 2024-12-09T13:32:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742088_1264 (size=111872) 2024-12-09T13:32:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742088_1264 (size=111872) 2024-12-09T13:32:35,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742088_1264 (size=111872) 2024-12-09T13:32:35,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742089_1265 (size=1597213) 2024-12-09T13:32:35,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742089_1265 (size=1597213) 2024-12-09T13:32:35,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742089_1265 (size=1597213) 2024-12-09T13:32:35,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742090_1266 (size=4188619) 2024-12-09T13:32:35,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742090_1266 (size=4188619) 2024-12-09T13:32:35,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742090_1266 (size=4188619) 2024-12-09T13:32:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742091_1267 (size=127628) 2024-12-09T13:32:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742091_1267 (size=127628) 2024-12-09T13:32:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742091_1267 (size=127628) 2024-12-09T13:32:35,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742092_1268 (size=29229) 2024-12-09T13:32:35,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742092_1268 (size=29229) 2024-12-09T13:32:35,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742092_1268 (size=29229) 2024-12-09T13:32:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742093_1269 (size=45609) 2024-12-09T13:32:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742093_1269 (size=45609) 2024-12-09T13:32:35,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742093_1269 (size=45609) 2024-12-09T13:32:35,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742094_1270 (size=8360360) 2024-12-09T13:32:35,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742094_1270 (size=8360360) 2024-12-09T13:32:35,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742094_1270 (size=8360360) 2024-12-09T13:32:35,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742095_1271 (size=1323991) 2024-12-09T13:32:35,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742095_1271 (size=1323991) 2024-12-09T13:32:35,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742095_1271 (size=1323991) 2024-12-09T13:32:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742096_1272 (size=232957) 2024-12-09T13:32:35,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742096_1272 (size=232957) 2024-12-09T13:32:35,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742096_1272 (size=232957) 2024-12-09T13:32:35,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742097_1273 (size=217634) 2024-12-09T13:32:35,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742097_1273 (size=217634) 2024-12-09T13:32:35,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742097_1273 (size=217634) 2024-12-09T13:32:35,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742098_1274 (size=903930) 2024-12-09T13:32:35,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742098_1274 (size=903930) 2024-12-09T13:32:35,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742098_1274 (size=903930) 2024-12-09T13:32:35,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742099_1275 (size=77835) 2024-12-09T13:32:35,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742099_1275 (size=77835) 2024-12-09T13:32:35,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742099_1275 (size=77835) 2024-12-09T13:32:35,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742100_1276 (size=6425017) 2024-12-09T13:32:35,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742100_1276 (size=6425017) 2024-12-09T13:32:35,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742100_1276 (size=6425017) 2024-12-09T13:32:35,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742101_1277 (size=1832290) 2024-12-09T13:32:35,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742101_1277 (size=1832290) 2024-12-09T13:32:35,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742101_1277 (size=1832290) 2024-12-09T13:32:35,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742102_1278 (size=30949) 2024-12-09T13:32:35,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742102_1278 (size=30949) 2024-12-09T13:32:35,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742102_1278 (size=30949) 2024-12-09T13:32:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742103_1279 (size=136454) 2024-12-09T13:32:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742103_1279 (size=136454) 2024-12-09T13:32:35,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742103_1279 (size=136454) 2024-12-09T13:32:35,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742104_1280 (size=322274) 2024-12-09T13:32:35,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742104_1280 (size=322274) 2024-12-09T13:32:35,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742104_1280 (size=322274) 2024-12-09T13:32:35,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742105_1281 (size=131440) 2024-12-09T13:32:35,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742105_1281 (size=131440) 2024-12-09T13:32:35,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742105_1281 (size=131440) 2024-12-09T13:32:35,230 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:32:35,232 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-09T13:32:35,233 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-12-09T13:32:35,233 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-12-09T13:32:35,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742106_1282 (size=441) 2024-12-09T13:32:35,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742106_1282 (size=441) 2024-12-09T13:32:35,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742106_1282 (size=441) 2024-12-09T13:32:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742107_1283 (size=21) 2024-12-09T13:32:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742107_1283 (size=21) 2024-12-09T13:32:35,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742107_1283 (size=21) 2024-12-09T13:32:35,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742108_1284 (size=304151) 2024-12-09T13:32:35,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742108_1284 (size=304151) 2024-12-09T13:32:35,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742108_1284 (size=304151) 2024-12-09T13:32:36,179 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:32:36,179 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:32:36,182 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0004_000001 (auth:SIMPLE) from 127.0.0.1:37992 2024-12-09T13:32:36,194 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000001/launch_container.sh] 2024-12-09T13:32:36,194 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000001/container_tokens] 2024-12-09T13:32:36,194 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0004/container_1733750974572_0004_01_000001/sysfs] 2024-12-09T13:32:36,689 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0005_000001 (auth:SIMPLE) from 127.0.0.1:39962 2024-12-09T13:32:37,843 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:32:37,881 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-09T13:32:37,881 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-09T13:32:37,882 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-09T13:32:41,732 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0005_000001 (auth:SIMPLE) from 127.0.0.1:49384 2024-12-09T13:32:41,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742109_1285 (size=349849) 2024-12-09T13:32:41,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742109_1285 (size=349849) 2024-12-09T13:32:41,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742109_1285 (size=349849) 2024-12-09T13:32:43,384 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:32:44,090 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0005_000001 (auth:SIMPLE) from 127.0.0.1:38962 2024-12-09T13:32:44,096 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0005_000001 (auth:SIMPLE) from 127.0.0.1:36224 2024-12-09T13:32:50,705 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000003/launch_container.sh] 2024-12-09T13:32:50,705 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000003/container_tokens] 2024-12-09T13:32:50,705 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000003/sysfs] 2024-12-09T13:32:51,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742110_1286 (size=22223) 2024-12-09T13:32:51,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742110_1286 (size=22223) 2024-12-09T13:32:51,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742110_1286 (size=22223) 2024-12-09T13:32:51,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742111_1287 (size=463) 2024-12-09T13:32:51,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742111_1287 (size=463) 2024-12-09T13:32:51,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742111_1287 (size=463) 2024-12-09T13:32:52,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742112_1288 (size=22223) 2024-12-09T13:32:52,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742112_1288 (size=22223) 2024-12-09T13:32:52,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742112_1288 (size=22223) 2024-12-09T13:32:52,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742113_1289 (size=349849) 2024-12-09T13:32:52,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742113_1289 (size=349849) 2024-12-09T13:32:52,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742113_1289 (size=349849) 2024-12-09T13:32:52,049 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0005_000001 (auth:SIMPLE) from 127.0.0.1:37676 2024-12-09T13:32:52,060 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733750974572_0005_01_000002 is : 143 2024-12-09T13:32:52,068 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000002/launch_container.sh] 2024-12-09T13:32:52,068 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000002/container_tokens] 2024-12-09T13:32:52,068 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000002/sysfs] 2024-12-09T13:32:53,444 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:32:53,444 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:32:53,447 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-09T13:32:53,447 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:32:53,447 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:32:53,447 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T13:32:53,449 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T13:32:53,449 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T13:32:53,449 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@34003054 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T13:32:53,449 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T13:32:53,449 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T13:32:53,451 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:53,491 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:32:53,492 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@34003054, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T13:32:53,493 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:32:53,497 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-09T13:32:53,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:53,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:53,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-12020221119346067807.jar 2024-12-09T13:32:54,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-13441177735392374296.jar 2024-12-09T13:32:54,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:32:54,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:32:54,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:32:54,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:32:54,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:32:54,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:32:54,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:54,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:54,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:54,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:54,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:32:54,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:54,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:32:54,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742114_1290 (size=5175431) 2024-12-09T13:32:54,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742114_1290 (size=5175431) 2024-12-09T13:32:54,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742114_1290 (size=5175431) 2024-12-09T13:32:54,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742115_1291 (size=1877034) 2024-12-09T13:32:54,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742115_1291 (size=1877034) 2024-12-09T13:32:54,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742115_1291 (size=1877034) 2024-12-09T13:32:54,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742116_1292 (size=20406) 2024-12-09T13:32:54,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742116_1292 (size=20406) 2024-12-09T13:32:54,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742116_1292 (size=20406) 2024-12-09T13:32:54,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742117_1293 (size=6425017) 2024-12-09T13:32:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742117_1293 (size=6425017) 2024-12-09T13:32:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742117_1293 (size=6425017) 2024-12-09T13:32:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742118_1294 (size=24096) 2024-12-09T13:32:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742118_1294 (size=24096) 2024-12-09T13:32:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742118_1294 (size=24096) 2024-12-09T13:32:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742119_1295 (size=503880) 2024-12-09T13:32:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742119_1295 (size=503880) 2024-12-09T13:32:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742119_1295 (size=503880) 2024-12-09T13:32:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742120_1296 (size=4695811) 2024-12-09T13:32:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742120_1296 (size=4695811) 2024-12-09T13:32:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742120_1296 (size=4695811) 2024-12-09T13:32:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742121_1297 (size=111872) 2024-12-09T13:32:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742121_1297 (size=111872) 2024-12-09T13:32:54,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742121_1297 (size=111872) 2024-12-09T13:32:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742122_1298 (size=1597213) 2024-12-09T13:32:54,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742122_1298 (size=1597213) 2024-12-09T13:32:54,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742122_1298 (size=1597213) 2024-12-09T13:32:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742123_1299 (size=4188619) 2024-12-09T13:32:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742123_1299 (size=4188619) 2024-12-09T13:32:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742123_1299 (size=4188619) 2024-12-09T13:32:54,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742124_1300 (size=127628) 2024-12-09T13:32:54,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742124_1300 (size=127628) 2024-12-09T13:32:54,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742124_1300 (size=127628) 2024-12-09T13:32:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742125_1301 (size=29229) 2024-12-09T13:32:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742125_1301 (size=29229) 2024-12-09T13:32:54,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742125_1301 (size=29229) 2024-12-09T13:32:54,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742126_1302 (size=45609) 2024-12-09T13:32:54,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742126_1302 (size=45609) 2024-12-09T13:32:54,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742126_1302 (size=45609) 2024-12-09T13:32:54,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742127_1303 (size=8360360) 2024-12-09T13:32:54,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742127_1303 (size=8360360) 2024-12-09T13:32:54,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742127_1303 (size=8360360) 2024-12-09T13:32:54,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742128_1304 (size=1323991) 2024-12-09T13:32:54,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742128_1304 (size=1323991) 2024-12-09T13:32:54,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742128_1304 (size=1323991) 2024-12-09T13:32:54,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742129_1305 (size=232957) 2024-12-09T13:32:54,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742129_1305 (size=232957) 2024-12-09T13:32:54,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742129_1305 (size=232957) 2024-12-09T13:32:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742130_1306 (size=217634) 2024-12-09T13:32:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742130_1306 (size=217634) 2024-12-09T13:32:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742130_1306 (size=217634) 2024-12-09T13:32:54,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742131_1307 (size=903930) 2024-12-09T13:32:54,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742131_1307 (size=903930) 2024-12-09T13:32:54,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742131_1307 (size=903930) 2024-12-09T13:32:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742132_1308 (size=77835) 2024-12-09T13:32:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742132_1308 (size=77835) 2024-12-09T13:32:54,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742132_1308 (size=77835) 2024-12-09T13:32:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742133_1309 (size=443172) 2024-12-09T13:32:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742133_1309 (size=443172) 2024-12-09T13:32:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742133_1309 (size=443172) 2024-12-09T13:32:54,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742134_1310 (size=1832290) 2024-12-09T13:32:54,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742134_1310 (size=1832290) 2024-12-09T13:32:54,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742134_1310 (size=1832290) 2024-12-09T13:32:54,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742135_1311 (size=30949) 2024-12-09T13:32:54,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742135_1311 (size=30949) 2024-12-09T13:32:54,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742135_1311 (size=30949) 2024-12-09T13:32:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742136_1312 (size=136454) 2024-12-09T13:32:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742136_1312 (size=136454) 2024-12-09T13:32:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742136_1312 (size=136454) 2024-12-09T13:32:54,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742137_1313 (size=322274) 2024-12-09T13:32:54,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742137_1313 (size=322274) 2024-12-09T13:32:54,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742137_1313 (size=322274) 2024-12-09T13:32:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742138_1314 (size=131440) 2024-12-09T13:32:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742138_1314 (size=131440) 2024-12-09T13:32:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742138_1314 (size=131440) 2024-12-09T13:32:54,689 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:32:54,691 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-09T13:32:54,692 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-12-09T13:32:54,693 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-12-09T13:32:54,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742139_1315 (size=441) 2024-12-09T13:32:54,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742139_1315 (size=441) 2024-12-09T13:32:54,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742139_1315 (size=441) 2024-12-09T13:32:54,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742140_1316 (size=21) 2024-12-09T13:32:54,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742140_1316 (size=21) 2024-12-09T13:32:54,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742140_1316 (size=21) 2024-12-09T13:32:54,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742141_1317 (size=304153) 2024-12-09T13:32:54,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742141_1317 (size=304153) 2024-12-09T13:32:54,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742141_1317 (size=304153) 2024-12-09T13:32:56,021 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:32:58,121 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:32:58,121 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:32:58,125 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0005_000001 (auth:SIMPLE) from 127.0.0.1:43302 2024-12-09T13:32:58,134 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000001/launch_container.sh] 2024-12-09T13:32:58,134 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000001/container_tokens] 2024-12-09T13:32:58,134 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0005/container_1733750974572_0005_01_000001/sysfs] 2024-12-09T13:32:59,070 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0006_000001 (auth:SIMPLE) from 127.0.0.1:37688 2024-12-09T13:33:03,854 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0006_000001 (auth:SIMPLE) from 127.0.0.1:58572 2024-12-09T13:33:04,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742142_1318 (size=349851) 2024-12-09T13:33:04,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742142_1318 (size=349851) 2024-12-09T13:33:04,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742142_1318 (size=349851) 2024-12-09T13:33:06,247 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0006_000001 (auth:SIMPLE) from 127.0.0.1:36598 2024-12-09T13:33:06,248 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0006_000001 (auth:SIMPLE) from 127.0.0.1:59602 2024-12-09T13:33:12,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000002/launch_container.sh] 2024-12-09T13:33:12,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000002/container_tokens] 2024-12-09T13:33:12,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000002/sysfs] 2024-12-09T13:33:13,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742143_1319 (size=21177) 2024-12-09T13:33:13,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742143_1319 (size=21177) 2024-12-09T13:33:13,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742143_1319 (size=21177) 2024-12-09T13:33:13,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742144_1320 (size=463) 2024-12-09T13:33:13,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742144_1320 (size=463) 2024-12-09T13:33:13,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742144_1320 (size=463) 2024-12-09T13:33:13,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742145_1321 (size=21177) 2024-12-09T13:33:13,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742145_1321 (size=21177) 2024-12-09T13:33:13,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742145_1321 (size=21177) 2024-12-09T13:33:13,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742146_1322 (size=349851) 2024-12-09T13:33:13,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742146_1322 (size=349851) 2024-12-09T13:33:13,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742146_1322 (size=349851) 2024-12-09T13:33:13,311 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000003/launch_container.sh] 2024-12-09T13:33:13,311 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000003/container_tokens] 2024-12-09T13:33:13,311 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000003/sysfs] 2024-12-09T13:33:13,320 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0006_000001 (auth:SIMPLE) from 127.0.0.1:58498 2024-12-09T13:33:15,220 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:33:15,220 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:33:15,222 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-09T13:33:15,222 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:33:15,222 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:33:15,223 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T13:33:15,224 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T13:33:15,224 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T13:33:15,224 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@34003054 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-09T13:33:15,224 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-09T13:33:15,224 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751153994/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-09T13:33:15,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-09T13:33:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-09T13:33:15,240 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751195239"}]},"ts":"1733751195239"} 2024-12-09T13:33:15,241 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-09T13:33:15,241 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-09T13:33:15,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-09T13:33:15,243 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, UNASSIGN}] 2024-12-09T13:33:15,243 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, UNASSIGN 2024-12-09T13:33:15,244 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, UNASSIGN 2024-12-09T13:33:15,244 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=267eb8d1a57e74c58845ab8327206bdd, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:15,244 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=437fd113967029188173f60a096ceda8, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:15,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, UNASSIGN because future has completed 2024-12-09T13:33:15,246 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:15,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 437fd113967029188173f60a096ceda8, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:15,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, UNASSIGN because future has completed 2024-12-09T13:33:15,247 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:15,247 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 267eb8d1a57e74c58845ab8327206bdd, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:15,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-09T13:33:15,399 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:33:15,399 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 437fd113967029188173f60a096ceda8 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 267eb8d1a57e74c58845ab8327206bdd, disabling compactions & flushes 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 437fd113967029188173f60a096ceda8, disabling compactions & flushes 2024-12-09T13:33:15,400 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:33:15,400 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. after waiting 0 ms 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. after waiting 0 ms 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:33:15,400 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:33:15,407 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:33:15,408 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:15,408 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd. 2024-12-09T13:33:15,408 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 267eb8d1a57e74c58845ab8327206bdd: Waiting for close lock at 1733751195400Running coprocessor pre-close hooks at 1733751195400Disabling compacts and flushes for region at 1733751195400Disabling writes for close at 1733751195400Writing region close event to WAL at 1733751195401 (+1 ms)Running coprocessor post-close hooks at 1733751195408 (+7 ms)Closed at 1733751195408 2024-12-09T13:33:15,411 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:33:15,411 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:33:15,411 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:15,412 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8. 2024-12-09T13:33:15,412 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=267eb8d1a57e74c58845ab8327206bdd, regionState=CLOSED 2024-12-09T13:33:15,412 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 437fd113967029188173f60a096ceda8: Waiting for close lock at 1733751195400Running coprocessor pre-close hooks at 1733751195400Disabling compacts and flushes for region at 1733751195400Disabling writes for close at 1733751195400Writing region close event to WAL at 1733751195402 (+2 ms)Running coprocessor post-close hooks at 1733751195411 (+9 ms)Closed at 1733751195411 2024-12-09T13:33:15,413 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 437fd113967029188173f60a096ceda8 2024-12-09T13:33:15,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 267eb8d1a57e74c58845ab8327206bdd, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:15,413 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=437fd113967029188173f60a096ceda8, regionState=CLOSED 2024-12-09T13:33:15,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 437fd113967029188173f60a096ceda8, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:15,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-09T13:33:15,417 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 267eb8d1a57e74c58845ab8327206bdd, server=c48e4b8eb196,39211,1733750968235 in 167 msec 2024-12-09T13:33:15,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=267eb8d1a57e74c58845ab8327206bdd, UNASSIGN in 173 msec 2024-12-09T13:33:15,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-09T13:33:15,417 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 437fd113967029188173f60a096ceda8, server=c48e4b8eb196,44683,1733750968041 in 170 msec 2024-12-09T13:33:15,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-12-09T13:33:15,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=437fd113967029188173f60a096ceda8, UNASSIGN in 174 msec 2024-12-09T13:33:15,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-09T13:33:15,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 177 msec 2024-12-09T13:33:15,422 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751195422"}]},"ts":"1733751195422"} 2024-12-09T13:33:15,424 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-09T13:33:15,424 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-09T13:33:15,425 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 187 msec 2024-12-09T13:33:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-09T13:33:15,553 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T13:33:15,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-09T13:33:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,556 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-09T13:33:15,557 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,560 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-09T13:33:15,561 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8 2024-12-09T13:33:15,561 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:33:15,563 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/recovered.edits] 2024-12-09T13:33:15,563 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/recovered.edits] 2024-12-09T13:33:15,567 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/cf/12661ec6d2ae4e47ba7f6eff6bd8fbeb 2024-12-09T13:33:15,567 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf/53eee90d0d0a4040bb582c25d9f7f1eb to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/cf/53eee90d0d0a4040bb582c25d9f7f1eb 2024-12-09T13:33:15,569 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8/recovered.edits/9.seqid 2024-12-09T13:33:15,569 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd/recovered.edits/9.seqid 2024-12-09T13:33:15,570 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/437fd113967029188173f60a096ceda8 2024-12-09T13:33:15,570 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testConsecutiveExports/267eb8d1a57e74c58845ab8327206bdd 2024-12-09T13:33:15,570 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-09T13:33:15,572 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,574 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-09T13:33:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T13:33:15,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T13:33:15,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T13:33:15,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-09T13:33:15,612 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-09T13:33:15,613 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,613 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-09T13:33:15,614 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751195613"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:15,614 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751195613"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:15,615 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:33:15,616 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 437fd113967029188173f60a096ceda8, NAME => 'testtb-testConsecutiveExports,,1733751152654.437fd113967029188173f60a096ceda8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 267eb8d1a57e74c58845ab8327206bdd, NAME => 'testtb-testConsecutiveExports,1,1733751152654.267eb8d1a57e74c58845ab8327206bdd.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:33:15,616 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-09T13:33:15,616 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751195616"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:15,618 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-09T13:33:15,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:15,619 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-09T13:33:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-09T13:33:15,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 65 msec 2024-12-09T13:33:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-09T13:33:15,725 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-09T13:33:15,726 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-09T13:33:15,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-09T13:33:15,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-09T13:33:15,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-09T13:33:15,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-09T13:33:15,769 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=808 (was 805) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:56046 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 141786) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-203194278_1 at /127.0.0.1:56012 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5036 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-203194278_1 at /127.0.0.1:58758 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44139 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:44139 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:58768 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 811), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=771 (was 672) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=1449 (was 1804) 2024-12-09T13:33:15,769 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-09T13:33:15,785 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=808, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=771, ProcessCount=20, AvailableMemoryMB=1448 2024-12-09T13:33:15,785 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-09T13:33:15,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:33:15,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:15,788 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:33:15,788 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:15,789 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-09T13:33:15,789 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:33:15,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-09T13:33:15,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742147_1323 (size=422) 2024-12-09T13:33:15,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742147_1323 (size=422) 2024-12-09T13:33:15,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742147_1323 (size=422) 2024-12-09T13:33:15,798 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1bcbe3770b02c0b22c3abe057c6ed82d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:15,799 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ebf7c4bb66d6a538b64ca52a27de834f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:15,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742148_1324 (size=83) 2024-12-09T13:33:15,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742149_1325 (size=83) 2024-12-09T13:33:15,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742148_1324 (size=83) 2024-12-09T13:33:15,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742148_1324 (size=83) 2024-12-09T13:33:15,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742149_1325 (size=83) 2024-12-09T13:33:15,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742149_1325 (size=83) 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 1bcbe3770b02c0b22c3abe057c6ed82d, disabling compactions & flushes 2024-12-09T13:33:15,807 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. after waiting 0 ms 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:15,807 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1bcbe3770b02c0b22c3abe057c6ed82d: Waiting for close lock at 1733751195807Disabling compacts and flushes for region at 1733751195807Disabling writes for close at 1733751195807Writing region close event to WAL at 1733751195807Closed at 1733751195807 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing ebf7c4bb66d6a538b64ca52a27de834f, disabling compactions & flushes 2024-12-09T13:33:15,807 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. after waiting 0 ms 2024-12-09T13:33:15,807 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:15,808 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:15,808 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for ebf7c4bb66d6a538b64ca52a27de834f: Waiting for close lock at 1733751195807Disabling compacts and flushes for region at 1733751195807Disabling writes for close at 1733751195807Writing region close event to WAL at 1733751195807Closed at 1733751195807 2024-12-09T13:33:15,809 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:33:15,809 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733751195809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751195809"}]},"ts":"1733751195809"} 2024-12-09T13:33:15,809 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733751195809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751195809"}]},"ts":"1733751195809"} 2024-12-09T13:33:15,812 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:33:15,813 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:33:15,813 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751195813"}]},"ts":"1733751195813"} 2024-12-09T13:33:15,815 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-09T13:33:15,815 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:33:15,816 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:33:15,816 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:33:15,816 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:33:15,816 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:33:15,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, ASSIGN}] 2024-12-09T13:33:15,818 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, ASSIGN 2024-12-09T13:33:15,818 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, ASSIGN 2024-12-09T13:33:15,818 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:33:15,818 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:33:15,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-09T13:33:15,969 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:33:15,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=ebf7c4bb66d6a538b64ca52a27de834f, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:15,969 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=1bcbe3770b02c0b22c3abe057c6ed82d, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:15,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, ASSIGN because future has completed 2024-12-09T13:33:15,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:15,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, ASSIGN because future has completed 2024-12-09T13:33:15,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:16,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-09T13:33:16,131 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:16,131 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,132 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => ebf7c4bb66d6a538b64ca52a27de834f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:33:16,132 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 1bcbe3770b02c0b22c3abe057c6ed82d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:33:16,132 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. service=AccessControlService 2024-12-09T13:33:16,132 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. service=AccessControlService 2024-12-09T13:33:16,132 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:16,132 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,133 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,135 INFO [StoreOpener-1bcbe3770b02c0b22c3abe057c6ed82d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,135 INFO [StoreOpener-ebf7c4bb66d6a538b64ca52a27de834f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,136 INFO [StoreOpener-1bcbe3770b02c0b22c3abe057c6ed82d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1bcbe3770b02c0b22c3abe057c6ed82d columnFamilyName cf 2024-12-09T13:33:16,136 INFO [StoreOpener-ebf7c4bb66d6a538b64ca52a27de834f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ebf7c4bb66d6a538b64ca52a27de834f columnFamilyName cf 2024-12-09T13:33:16,136 DEBUG [StoreOpener-1bcbe3770b02c0b22c3abe057c6ed82d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:16,136 DEBUG [StoreOpener-ebf7c4bb66d6a538b64ca52a27de834f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:16,137 INFO [StoreOpener-ebf7c4bb66d6a538b64ca52a27de834f-1 {}] regionserver.HStore(327): Store=ebf7c4bb66d6a538b64ca52a27de834f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:16,137 INFO [StoreOpener-1bcbe3770b02c0b22c3abe057c6ed82d-1 {}] regionserver.HStore(327): Store=1bcbe3770b02c0b22c3abe057c6ed82d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:16,137 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,137 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,137 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,137 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,138 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,138 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,138 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,138 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,138 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,138 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,140 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,140 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,142 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:16,142 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:16,142 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 1bcbe3770b02c0b22c3abe057c6ed82d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74004580, jitterRate=0.10275417566299438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:16,142 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened ebf7c4bb66d6a538b64ca52a27de834f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67572523, jitterRate=0.0069090574979782104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:16,142 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,142 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,143 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 1bcbe3770b02c0b22c3abe057c6ed82d: Running coprocessor pre-open hook at 1733751196133Writing region info on filesystem at 1733751196133Initializing all the Stores at 1733751196134 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751196134Cleaning up temporary data from old regions at 1733751196138 (+4 ms)Running coprocessor post-open hooks at 1733751196142 (+4 ms)Region opened successfully at 1733751196143 (+1 ms) 2024-12-09T13:33:16,143 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for ebf7c4bb66d6a538b64ca52a27de834f: Running coprocessor pre-open hook at 1733751196133Writing region info on filesystem at 1733751196133Initializing all the Stores at 1733751196134 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751196134Cleaning up temporary data from old regions at 1733751196138 (+4 ms)Running coprocessor post-open hooks at 1733751196142 (+4 ms)Region opened successfully at 1733751196143 (+1 ms) 2024-12-09T13:33:16,143 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d., pid=136, masterSystemTime=1733751196124 2024-12-09T13:33:16,143 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f., pid=135, masterSystemTime=1733751196123 2024-12-09T13:33:16,145 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,145 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=1bcbe3770b02c0b22c3abe057c6ed82d, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:16,146 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:16,146 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:16,146 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=ebf7c4bb66d6a538b64ca52a27de834f, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:16,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:16,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:16,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-09T13:33:16,150 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d, server=c48e4b8eb196,39211,1733750968235 in 177 msec 2024-12-09T13:33:16,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-09T13:33:16,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f, server=c48e4b8eb196,44683,1733750968041 in 179 msec 2024-12-09T13:33:16,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, ASSIGN in 334 msec 2024-12-09T13:33:16,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-09T13:33:16,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, ASSIGN in 335 msec 2024-12-09T13:33:16,153 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:33:16,153 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751196153"}]},"ts":"1733751196153"} 2024-12-09T13:33:16,155 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-09T13:33:16,155 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:33:16,155 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-09T13:33:16,158 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-09T13:33:16,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:16,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:16,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:16,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:16,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:16,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:16,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:16,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:16,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 441 msec 2024-12-09T13:33:16,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-09T13:33:16,413 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T13:33:16,413 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-09T13:33:16,414 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:16,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-09T13:33:16,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:16,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-09T13:33:16,419 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T13:33:16,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T13:33:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751196423 (current time:1733751196423). 2024-12-09T13:33:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-09T13:33:16,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f22e1a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:16,426 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:16,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:16,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:16,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24fdf7ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:16,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:16,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,428 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:16,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48607990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:16,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:16,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:16,431 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:16,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,432 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:16,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16e531c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:16,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:16,434 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:16,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:16,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:16,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77e7fb01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:16,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:16,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,435 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:16,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26dd9da4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:16,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:16,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:16,438 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41990, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:16,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:16,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,441 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-09T13:33:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T13:33:16,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-09T13:33:16,444 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-09T13:33:16,445 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:16,448 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:16,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742150_1326 (size=215) 2024-12-09T13:33:16,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742150_1326 (size=215) 2024-12-09T13:33:16,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742150_1326 (size=215) 2024-12-09T13:33:16,456 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:16,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f}] 2024-12-09T13:33:16,458 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,458 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-09T13:33:16,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-09T13:33:16,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-09T13:33:16,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:16,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 1bcbe3770b02c0b22c3abe057c6ed82d: 2024-12-09T13:33:16,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for ebf7c4bb66d6a538b64ca52a27de834f: 2024-12-09T13:33:16,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:33:16,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:33:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742151_1327 (size=86) 2024-12-09T13:33:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742151_1327 (size=86) 2024-12-09T13:33:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742151_1327 (size=86) 2024-12-09T13:33:16,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-09T13:33:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-09T13:33:16,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742152_1328 (size=86) 2024-12-09T13:33:16,626 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742152_1328 (size=86) 2024-12-09T13:33:16,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742152_1328 (size=86) 2024-12-09T13:33:16,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:16,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-09T13:33:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-09T13:33:16,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,628 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d in 171 msec 2024-12-09T13:33:16,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-12-09T13:33:16,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f in 172 msec 2024-12-09T13:33:16,630 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:16,631 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:16,631 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:16,631 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:16,632 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742153_1329 (size=597) 2024-12-09T13:33:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742153_1329 (size=597) 2024-12-09T13:33:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742153_1329 (size=597) 2024-12-09T13:33:16,644 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:16,648 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:16,649 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:16,650 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:16,650 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-09T13:33:16,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 208 msec 2024-12-09T13:33:16,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-09T13:33:16,764 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T13:33:16,772 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0489051a8af1e5167d946c08b709886dd', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:16,773 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='1f758833b77944d9e515d072ad95cf886', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:16,774 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2f9516b51c106b9e2ccbbc7b39a09c9e6', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:16,776 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3d52634a9f1b266587599d704279bb35d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:16,776 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='4dbd50fc1b5ae0cb88a5063f9a7e031c3', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:16,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:16,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:16,785 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T13:33:16,788 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:16,788 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,788 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:16,789 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T13:33:16,794 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T13:33:16,799 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-09T13:33:16,802 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T13:33:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751196802 (current time:1733751196802). 2024-12-09T13:33:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-09T13:33:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d8748d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:16,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:16,803 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:16,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:16,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:16,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@794140a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:16,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:16,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,805 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:16,805 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cbc0c7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:16,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:16,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:16,807 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:16,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:16,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:16,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,808 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:16,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f19d017, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:16,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:16,810 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:16,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:16,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:16,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@724e79b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:16,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:16,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,811 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:16,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f09ff54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:16,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:16,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:16,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:16,814 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42000, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:16,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:16,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:16,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:16,817 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-09T13:33:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:16,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-09T13:33:16,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-09T13:33:16,819 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T13:33:16,820 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:16,822 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:16,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742154_1330 (size=210) 2024-12-09T13:33:16,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742154_1330 (size=210) 2024-12-09T13:33:16,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742154_1330 (size=210) 2024-12-09T13:33:16,828 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:16,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f}] 2024-12-09T13:33:16,828 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:16,829 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:16,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T13:33:16,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-09T13:33:16,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-09T13:33:16,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:16,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:16,980 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing ebf7c4bb66d6a538b64ca52a27de834f 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-09T13:33:16,980 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 1bcbe3770b02c0b22c3abe057c6ed82d 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-09T13:33:16,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/.tmp/cf/68ece6dc240345cbb186c6b74c806267 is 71, key is 1902a830bf1045b5c8788155e6363218/cf:q/1733751196784/Put/seqid=0 2024-12-09T13:33:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742155_1331 (size=8256) 2024-12-09T13:33:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742155_1331 (size=8256) 2024-12-09T13:33:17,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/.tmp/cf/a3b63c41c8194a7190ae67bd3c72b394 is 71, key is 06fcfbafe8f5db5824ee0841e034c7f0/cf:q/1733751196782/Put/seqid=0 2024-12-09T13:33:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742155_1331 (size=8256) 2024-12-09T13:33:17,001 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/.tmp/cf/68ece6dc240345cbb186c6b74c806267 2024-12-09T13:33:17,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/.tmp/cf/68ece6dc240345cbb186c6b74c806267 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf/68ece6dc240345cbb186c6b74c806267 2024-12-09T13:33:17,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742156_1332 (size=5356) 2024-12-09T13:33:17,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742156_1332 (size=5356) 2024-12-09T13:33:17,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742156_1332 (size=5356) 2024-12-09T13:33:17,010 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/.tmp/cf/a3b63c41c8194a7190ae67bd3c72b394 2024-12-09T13:33:17,011 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf/68ece6dc240345cbb186c6b74c806267, entries=46, sequenceid=6, filesize=8.1 K 2024-12-09T13:33:17,012 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for ebf7c4bb66d6a538b64ca52a27de834f in 31ms, sequenceid=6, compaction requested=false 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for ebf7c4bb66d6a538b64ca52a27de834f: 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf/68ece6dc240345cbb186c6b74c806267] hfiles 2024-12-09T13:33:17,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf/68ece6dc240345cbb186c6b74c806267 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/.tmp/cf/a3b63c41c8194a7190ae67bd3c72b394 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf/a3b63c41c8194a7190ae67bd3c72b394 2024-12-09T13:33:17,023 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf/a3b63c41c8194a7190ae67bd3c72b394, entries=4, sequenceid=6, filesize=5.2 K 2024-12-09T13:33:17,024 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 1bcbe3770b02c0b22c3abe057c6ed82d in 44ms, sequenceid=6, compaction requested=false 2024-12-09T13:33:17,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 1bcbe3770b02c0b22c3abe057c6ed82d: 2024-12-09T13:33:17,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-09T13:33:17,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:17,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf/a3b63c41c8194a7190ae67bd3c72b394] hfiles 2024-12-09T13:33:17,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf/a3b63c41c8194a7190ae67bd3c72b394 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742157_1333 (size=125) 2024-12-09T13:33:17,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742157_1333 (size=125) 2024-12-09T13:33:17,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742157_1333 (size=125) 2024-12-09T13:33:17,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:17,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-09T13:33:17,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-09T13:33:17,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:17,029 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:17,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f in 202 msec 2024-12-09T13:33:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742158_1334 (size=125) 2024-12-09T13:33:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742158_1334 (size=125) 2024-12-09T13:33:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742158_1334 (size=125) 2024-12-09T13:33:17,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:17,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-09T13:33:17,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-09T13:33:17,043 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:17,043 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:17,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-12-09T13:33:17,046 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:17,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d in 216 msec 2024-12-09T13:33:17,047 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:17,047 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:17,047 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,048 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742159_1335 (size=675) 2024-12-09T13:33:17,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742159_1335 (size=675) 2024-12-09T13:33:17,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742159_1335 (size=675) 2024-12-09T13:33:17,063 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:17,067 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:17,068 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,069 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:17,069 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-09T13:33:17,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 251 msec 2024-12-09T13:33:17,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-09T13:33:17,133 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T13:33:17,135 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:33:17,135 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37274, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:33:17,135 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42010, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T13:33:17,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:33:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:17,138 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:33:17,138 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:17,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-09T13:33:17,139 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:33:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T13:33:17,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742160_1336 (size=399) 2024-12-09T13:33:17,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742160_1336 (size=399) 2024-12-09T13:33:17,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742160_1336 (size=399) 2024-12-09T13:33:17,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T13:33:17,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T13:33:17,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:17,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-09T13:33:17,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-09T13:33:17,547 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e6d6e668b7499b15a30493da1b169dae, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:17,548 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d2d53892ea44e719d7e3dbc6c23059b4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:17,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742162_1338 (size=85) 2024-12-09T13:33:17,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742162_1338 (size=85) 2024-12-09T13:33:17,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742162_1338 (size=85) 2024-12-09T13:33:17,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742161_1337 (size=85) 2024-12-09T13:33:17,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742161_1337 (size=85) 2024-12-09T13:33:17,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742161_1337 (size=85) 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing d2d53892ea44e719d7e3dbc6c23059b4, disabling compactions & flushes 2024-12-09T13:33:17,554 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. after waiting 0 ms 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,554 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for d2d53892ea44e719d7e3dbc6c23059b4: Waiting for close lock at 1733751197554Disabling compacts and flushes for region at 1733751197554Disabling writes for close at 1733751197554Writing region close event to WAL at 1733751197554Closed at 1733751197554 2024-12-09T13:33:17,554 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:17,555 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing e6d6e668b7499b15a30493da1b169dae, disabling compactions & flushes 2024-12-09T13:33:17,555 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,555 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,555 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. after waiting 0 ms 2024-12-09T13:33:17,555 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,555 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,555 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for e6d6e668b7499b15a30493da1b169dae: Waiting for close lock at 1733751197555Disabling compacts and flushes for region at 1733751197555Disabling writes for close at 1733751197555Writing region close event to WAL at 1733751197555Closed at 1733751197555 2024-12-09T13:33:17,556 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:33:17,556 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733751197556"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751197556"}]},"ts":"1733751197556"} 2024-12-09T13:33:17,556 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733751197556"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751197556"}]},"ts":"1733751197556"} 2024-12-09T13:33:17,558 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:33:17,558 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:33:17,559 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751197559"}]},"ts":"1733751197559"} 2024-12-09T13:33:17,560 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-09T13:33:17,560 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:33:17,561 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:33:17,561 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:33:17,561 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:33:17,561 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:33:17,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, ASSIGN}] 2024-12-09T13:33:17,562 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, ASSIGN 2024-12-09T13:33:17,562 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, ASSIGN 2024-12-09T13:33:17,563 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:33:17,563 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:33:17,713 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:33:17,714 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=d2d53892ea44e719d7e3dbc6c23059b4, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:17,714 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=e6d6e668b7499b15a30493da1b169dae, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:17,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, ASSIGN because future has completed 2024-12-09T13:33:17,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2d53892ea44e719d7e3dbc6c23059b4, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:17,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, ASSIGN because future has completed 2024-12-09T13:33:17,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure e6d6e668b7499b15a30493da1b169dae, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:17,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T13:33:17,871 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,871 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,871 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => d2d53892ea44e719d7e3dbc6c23059b4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4.', STARTKEY => '2', ENDKEY => ''} 2024-12-09T13:33:17,871 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => e6d6e668b7499b15a30493da1b169dae, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae.', STARTKEY => '', ENDKEY => '2'} 2024-12-09T13:33:17,871 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. service=AccessControlService 2024-12-09T13:33:17,871 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. service=AccessControlService 2024-12-09T13:33:17,872 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:17,872 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,872 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,873 INFO [StoreOpener-e6d6e668b7499b15a30493da1b169dae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,875 INFO [StoreOpener-d2d53892ea44e719d7e3dbc6c23059b4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,876 INFO [StoreOpener-e6d6e668b7499b15a30493da1b169dae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e6d6e668b7499b15a30493da1b169dae columnFamilyName cf 2024-12-09T13:33:17,876 DEBUG [StoreOpener-e6d6e668b7499b15a30493da1b169dae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:17,877 INFO [StoreOpener-e6d6e668b7499b15a30493da1b169dae-1 {}] regionserver.HStore(327): Store=e6d6e668b7499b15a30493da1b169dae/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:17,877 INFO [StoreOpener-d2d53892ea44e719d7e3dbc6c23059b4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2d53892ea44e719d7e3dbc6c23059b4 columnFamilyName cf 2024-12-09T13:33:17,877 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,877 DEBUG [StoreOpener-d2d53892ea44e719d7e3dbc6c23059b4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:17,877 INFO [StoreOpener-d2d53892ea44e719d7e3dbc6c23059b4-1 {}] regionserver.HStore(327): Store=d2d53892ea44e719d7e3dbc6c23059b4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:17,877 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,877 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,878 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,879 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,879 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,882 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:17,883 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened d2d53892ea44e719d7e3dbc6c23059b4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66188560, jitterRate=-0.013713598251342773}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:17,884 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:17,884 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for d2d53892ea44e719d7e3dbc6c23059b4: Running coprocessor pre-open hook at 1733751197872Writing region info on filesystem at 1733751197872Initializing all the Stores at 1733751197875 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751197875Cleaning up temporary data from old regions at 1733751197878 (+3 ms)Running coprocessor post-open hooks at 1733751197884 (+6 ms)Region opened successfully at 1733751197884 2024-12-09T13:33:17,885 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4., pid=146, masterSystemTime=1733751197868 2024-12-09T13:33:17,887 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,887 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:17,887 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:17,887 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=d2d53892ea44e719d7e3dbc6c23059b4, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:17,888 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened e6d6e668b7499b15a30493da1b169dae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69945581, jitterRate=0.04227037727832794}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:17,888 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:17,888 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for e6d6e668b7499b15a30493da1b169dae: Running coprocessor pre-open hook at 1733751197872Writing region info on filesystem at 1733751197872Initializing all the Stores at 1733751197873 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751197873Cleaning up temporary data from old regions at 1733751197878 (+5 ms)Running coprocessor post-open hooks at 1733751197888 (+10 ms)Region opened successfully at 1733751197888 2024-12-09T13:33:17,891 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae., pid=147, masterSystemTime=1733751197868 2024-12-09T13:33:17,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2d53892ea44e719d7e3dbc6c23059b4, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:17,893 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,893 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:17,895 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=e6d6e668b7499b15a30493da1b169dae, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:17,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-09T13:33:17,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure d2d53892ea44e719d7e3dbc6c23059b4, server=c48e4b8eb196,39211,1733750968235 in 177 msec 2024-12-09T13:33:17,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure e6d6e668b7499b15a30493da1b169dae, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:17,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, ASSIGN in 335 msec 2024-12-09T13:33:17,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-09T13:33:17,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure e6d6e668b7499b15a30493da1b169dae, server=c48e4b8eb196,44683,1733750968041 in 180 msec 2024-12-09T13:33:17,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-12-09T13:33:17,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, ASSIGN in 337 msec 2024-12-09T13:33:17,901 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:33:17,901 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751197901"}]},"ts":"1733751197901"} 2024-12-09T13:33:17,902 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-09T13:33:17,903 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:33:17,903 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-09T13:33:17,908 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-09T13:33:17,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:17,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:17,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:17,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,977 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:17,979 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 839 msec 2024-12-09T13:33:18,240 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-09T13:33:18,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-09T13:33:18,273 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T13:33:18,277 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:18,282 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:18,284 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-09T13:33:18,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [e6d6e668b7499b15a30493da1b169dae, d2d53892ea44e719d7e3dbc6c23059b4] 2024-12-09T13:33:18,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e6d6e668b7499b15a30493da1b169dae, d2d53892ea44e719d7e3dbc6c23059b4], force=true 2024-12-09T13:33:18,304 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e6d6e668b7499b15a30493da1b169dae, d2d53892ea44e719d7e3dbc6c23059b4], force=true 2024-12-09T13:33:18,304 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e6d6e668b7499b15a30493da1b169dae, d2d53892ea44e719d7e3dbc6c23059b4], force=true 2024-12-09T13:33:18,304 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e6d6e668b7499b15a30493da1b169dae, d2d53892ea44e719d7e3dbc6c23059b4], force=true 2024-12-09T13:33:18,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-09T13:33:18,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, UNASSIGN}] 2024-12-09T13:33:18,310 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, UNASSIGN 2024-12-09T13:33:18,310 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, UNASSIGN 2024-12-09T13:33:18,311 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=d2d53892ea44e719d7e3dbc6c23059b4, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:18,311 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=e6d6e668b7499b15a30493da1b169dae, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:18,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, UNASSIGN because future has completed 2024-12-09T13:33:18,313 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:18,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure d2d53892ea44e719d7e3dbc6c23059b4, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:18,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, UNASSIGN because future has completed 2024-12-09T13:33:18,313 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:18,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure e6d6e668b7499b15a30493da1b169dae, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:18,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-09T13:33:18,465 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:18,465 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T13:33:18,465 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing d2d53892ea44e719d7e3dbc6c23059b4, disabling compactions & flushes 2024-12-09T13:33:18,465 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:18,465 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:18,465 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. after waiting 0 ms 2024-12-09T13:33:18,465 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:18,465 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing d2d53892ea44e719d7e3dbc6c23059b4 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-09T13:33:18,466 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:18,466 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T13:33:18,466 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing e6d6e668b7499b15a30493da1b169dae, disabling compactions & flushes 2024-12-09T13:33:18,466 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:18,466 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:18,466 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. after waiting 0 ms 2024-12-09T13:33:18,466 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:18,466 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing e6d6e668b7499b15a30493da1b169dae 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-09T13:33:18,479 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/.tmp/cf/2fd449deb41745589aec80d9c2c88310 is 28, key is 1/cf:/1733751198278/Put/seqid=0 2024-12-09T13:33:18,479 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/.tmp/cf/03f259ed668a4d2ea7c04171d3138b64 is 28, key is 2/cf:/1733751198283/Put/seqid=0 2024-12-09T13:33:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742163_1339 (size=4945) 2024-12-09T13:33:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742163_1339 (size=4945) 2024-12-09T13:33:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742163_1339 (size=4945) 2024-12-09T13:33:18,488 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/.tmp/cf/2fd449deb41745589aec80d9c2c88310 2024-12-09T13:33:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742164_1340 (size=4945) 2024-12-09T13:33:18,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742164_1340 (size=4945) 2024-12-09T13:33:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742164_1340 (size=4945) 2024-12-09T13:33:18,491 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/.tmp/cf/03f259ed668a4d2ea7c04171d3138b64 2024-12-09T13:33:18,493 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/.tmp/cf/2fd449deb41745589aec80d9c2c88310 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf/2fd449deb41745589aec80d9c2c88310 2024-12-09T13:33:18,496 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/.tmp/cf/03f259ed668a4d2ea7c04171d3138b64 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf/03f259ed668a4d2ea7c04171d3138b64 2024-12-09T13:33:18,497 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf/2fd449deb41745589aec80d9c2c88310, entries=1, sequenceid=5, filesize=4.8 K 2024-12-09T13:33:18,498 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for e6d6e668b7499b15a30493da1b169dae in 32ms, sequenceid=5, compaction requested=false 2024-12-09T13:33:18,501 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf/03f259ed668a4d2ea7c04171d3138b64, entries=1, sequenceid=5, filesize=4.8 K 2024-12-09T13:33:18,502 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d2d53892ea44e719d7e3dbc6c23059b4 in 37ms, sequenceid=5, compaction requested=false 2024-12-09T13:33:18,510 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T13:33:18,510 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T13:33:18,510 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:18,511 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:18,511 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. 2024-12-09T13:33:18,511 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. 2024-12-09T13:33:18,511 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for e6d6e668b7499b15a30493da1b169dae: Waiting for close lock at 1733751198466Running coprocessor pre-close hooks at 1733751198466Disabling compacts and flushes for region at 1733751198466Disabling writes for close at 1733751198466Obtaining lock to block concurrent updates at 1733751198466Preparing flush snapshotting stores in e6d6e668b7499b15a30493da1b169dae at 1733751198466Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733751198466Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae. at 1733751198467 (+1 ms)Flushing e6d6e668b7499b15a30493da1b169dae/cf: creating writer at 1733751198467Flushing e6d6e668b7499b15a30493da1b169dae/cf: appending metadata at 1733751198479 (+12 ms)Flushing e6d6e668b7499b15a30493da1b169dae/cf: closing flushed file at 1733751198479Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32c18319: reopening flushed file at 1733751198492 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for e6d6e668b7499b15a30493da1b169dae in 32ms, sequenceid=5, compaction requested=false at 1733751198498 (+6 ms)Writing region close event to WAL at 1733751198499 (+1 ms)Running coprocessor post-close hooks at 1733751198510 (+11 ms)Closed at 1733751198511 (+1 ms) 2024-12-09T13:33:18,511 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for d2d53892ea44e719d7e3dbc6c23059b4: Waiting for close lock at 1733751198465Running coprocessor pre-close hooks at 1733751198465Disabling compacts and flushes for region at 1733751198465Disabling writes for close at 1733751198465Obtaining lock to block concurrent updates at 1733751198465Preparing flush snapshotting stores in d2d53892ea44e719d7e3dbc6c23059b4 at 1733751198466 (+1 ms)Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733751198466Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4. at 1733751198466Flushing d2d53892ea44e719d7e3dbc6c23059b4/cf: creating writer at 1733751198466Flushing d2d53892ea44e719d7e3dbc6c23059b4/cf: appending metadata at 1733751198479 (+13 ms)Flushing d2d53892ea44e719d7e3dbc6c23059b4/cf: closing flushed file at 1733751198479Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@426f7bb8: reopening flushed file at 1733751198495 (+16 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d2d53892ea44e719d7e3dbc6c23059b4 in 37ms, sequenceid=5, compaction requested=false at 1733751198502 (+7 ms)Writing region close event to WAL at 1733751198503 (+1 ms)Running coprocessor post-close hooks at 1733751198511 (+8 ms)Closed at 1733751198511 2024-12-09T13:33:18,513 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:18,514 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=d2d53892ea44e719d7e3dbc6c23059b4, regionState=CLOSED 2024-12-09T13:33:18,515 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=e6d6e668b7499b15a30493da1b169dae, regionState=CLOSED 2024-12-09T13:33:18,515 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:18,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure d2d53892ea44e719d7e3dbc6c23059b4, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:18,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure e6d6e668b7499b15a30493da1b169dae, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:18,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-12-09T13:33:18,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-12-09T13:33:18,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure d2d53892ea44e719d7e3dbc6c23059b4, server=c48e4b8eb196,39211,1733750968235 in 205 msec 2024-12-09T13:33:18,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure e6d6e668b7499b15a30493da1b169dae, server=c48e4b8eb196,44683,1733750968041 in 206 msec 2024-12-09T13:33:18,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d2d53892ea44e719d7e3dbc6c23059b4, UNASSIGN in 211 msec 2024-12-09T13:33:18,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-09T13:33:18,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e6d6e668b7499b15a30493da1b169dae, UNASSIGN in 212 msec 2024-12-09T13:33:18,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742165_1341 (size=84) 2024-12-09T13:33:18,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742165_1341 (size=84) 2024-12-09T13:33:18,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742165_1341 (size=84) 2024-12-09T13:33:18,541 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:18,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742166_1342 (size=20) 2024-12-09T13:33:18,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742166_1342 (size=20) 2024-12-09T13:33:18,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742166_1342 (size=20) 2024-12-09T13:33:18,555 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742167_1343 (size=21) 2024-12-09T13:33:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742167_1343 (size=21) 2024-12-09T13:33:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742167_1343 (size=21) 2024-12-09T13:33:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742168_1344 (size=84) 2024-12-09T13:33:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742168_1344 (size=84) 2024-12-09T13:33:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742168_1344 (size=84) 2024-12-09T13:33:18,579 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:18,587 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-09T13:33:18,589 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197136.e6d6e668b7499b15a30493da1b169dae.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:18,589 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733751197136.d2d53892ea44e719d7e3dbc6c23059b4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:18,589 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:18,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, ASSIGN}] 2024-12-09T13:33:18,595 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, ASSIGN 2024-12-09T13:33:18,596 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, ASSIGN; state=MERGED, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:33:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-09T13:33:18,746 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T13:33:18,747 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=941fdb94a7ed0c6a947cad68d03d6584, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:18,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, ASSIGN because future has completed 2024-12-09T13:33:18,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:18,905 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:18,905 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 941fdb94a7ed0c6a947cad68d03d6584, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:33:18,905 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. service=AccessControlService 2024-12-09T13:33:18,906 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:18,906 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,906 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:18,906 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,906 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,908 INFO [StoreOpener-941fdb94a7ed0c6a947cad68d03d6584-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,909 INFO [StoreOpener-941fdb94a7ed0c6a947cad68d03d6584-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 941fdb94a7ed0c6a947cad68d03d6584 columnFamilyName cf 2024-12-09T13:33:18,909 DEBUG [StoreOpener-941fdb94a7ed0c6a947cad68d03d6584-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:18,918 DEBUG [StoreOpener-941fdb94a7ed0c6a947cad68d03d6584-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/03f259ed668a4d2ea7c04171d3138b64.d2d53892ea44e719d7e3dbc6c23059b4->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf/03f259ed668a4d2ea7c04171d3138b64-top 2024-12-09T13:33:18,924 DEBUG [StoreOpener-941fdb94a7ed0c6a947cad68d03d6584-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/2fd449deb41745589aec80d9c2c88310.e6d6e668b7499b15a30493da1b169dae->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf/2fd449deb41745589aec80d9c2c88310-top 2024-12-09T13:33:18,925 INFO [StoreOpener-941fdb94a7ed0c6a947cad68d03d6584-1 {}] regionserver.HStore(327): Store=941fdb94a7ed0c6a947cad68d03d6584/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:18,925 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,926 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,928 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,928 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,928 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,930 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,931 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 941fdb94a7ed0c6a947cad68d03d6584; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62249335, jitterRate=-0.07241262495517731}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:18,931 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:18,931 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 941fdb94a7ed0c6a947cad68d03d6584: Running coprocessor pre-open hook at 1733751198906Writing region info on filesystem at 1733751198906Initializing all the Stores at 1733751198907 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751198907Cleaning up temporary data from old regions at 1733751198928 (+21 ms)Running coprocessor post-open hooks at 1733751198931 (+3 ms)Region opened successfully at 1733751198931 2024-12-09T13:33:18,932 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584., pid=154, masterSystemTime=1733751198901 2024-12-09T13:33:18,932 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.,because compaction is disabled. 2024-12-09T13:33:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-09T13:33:18,934 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:18,934 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:18,934 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=941fdb94a7ed0c6a947cad68d03d6584, regionState=OPEN, openSeqNum=9, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:18,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:18,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-09T13:33:18,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584, server=c48e4b8eb196,44683,1733750968041 in 188 msec 2024-12-09T13:33:18,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-09T13:33:18,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, ASSIGN in 344 msec 2024-12-09T13:33:18,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e6d6e668b7499b15a30493da1b169dae, d2d53892ea44e719d7e3dbc6c23059b4], force=true in 640 msec 2024-12-09T13:33:19,398 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0006_000001 (auth:SIMPLE) from 127.0.0.1:41940 2024-12-09T13:33:19,405 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000001/launch_container.sh] 2024-12-09T13:33:19,405 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000001/container_tokens] 2024-12-09T13:33:19,405 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_0/usercache/jenkins/appcache/application_1733750974572_0006/container_1733750974572_0006_01_000001/sysfs] 2024-12-09T13:33:19,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-09T13:33:19,443 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T13:33:19,444 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-09T13:33:19,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751199444 (current time:1733751199444). 2024-12-09T13:33:19,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:19,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-09T13:33:19,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:19,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cc177f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:19,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:19,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:19,446 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:19,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:19,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:19,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30944755, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:19,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:19,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:19,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:19,447 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:19,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25b24441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:19,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:19,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:19,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:19,450 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:19,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:19,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:19,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:19,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:19,451 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:19,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50ceb97d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:19,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:19,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:19,453 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:19,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:19,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:19,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e2af103, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:19,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:19,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:19,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:19,455 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38348, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:19,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c3b9eec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:19,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:19,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:19,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:19,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:19,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:19,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:19,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:19,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:19,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:19,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-09T13:33:19,461 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:19,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:19,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-09T13:33:19,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-09T13:33:19,464 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:19,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-09T13:33:19,465 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:19,467 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:19,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742169_1345 (size=216) 2024-12-09T13:33:19,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742169_1345 (size=216) 2024-12-09T13:33:19,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742169_1345 (size=216) 2024-12-09T13:33:19,473 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:19,473 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584}] 2024-12-09T13:33:19,474 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:19,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-09T13:33:19,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-09T13:33:19,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:19,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 941fdb94a7ed0c6a947cad68d03d6584: 2024-12-09T13:33:19,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-09T13:33:19,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:19,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/03f259ed668a4d2ea7c04171d3138b64.d2d53892ea44e719d7e3dbc6c23059b4->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf/03f259ed668a4d2ea7c04171d3138b64-top, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/2fd449deb41745589aec80d9c2c88310.e6d6e668b7499b15a30493da1b169dae->hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf/2fd449deb41745589aec80d9c2c88310-top] hfiles 2024-12-09T13:33:19,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/03f259ed668a4d2ea7c04171d3138b64.d2d53892ea44e719d7e3dbc6c23059b4 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/2fd449deb41745589aec80d9c2c88310.e6d6e668b7499b15a30493da1b169dae for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742170_1346 (size=269) 2024-12-09T13:33:19,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742170_1346 (size=269) 2024-12-09T13:33:19,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742170_1346 (size=269) 2024-12-09T13:33:19,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:19,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-09T13:33:19,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-09T13:33:19,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:19,647 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:19,651 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-09T13:33:19,651 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:19,651 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584 in 175 msec 2024-12-09T13:33:19,652 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:19,653 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:19,653 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,654 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742171_1347 (size=670) 2024-12-09T13:33:19,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742171_1347 (size=670) 2024-12-09T13:33:19,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742171_1347 (size=670) 2024-12-09T13:33:19,681 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:19,689 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:19,690 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,692 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:19,692 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-09T13:33:19,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 230 msec 2024-12-09T13:33:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-09T13:33:19,783 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T13:33:19,784 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783 2024-12-09T13:33:19,784 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:19,809 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:19,809 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,811 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:33:19,815 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:19,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742173_1349 (size=670) 2024-12-09T13:33:19,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742173_1349 (size=670) 2024-12-09T13:33:19,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742173_1349 (size=670) 2024-12-09T13:33:19,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742172_1348 (size=216) 2024-12-09T13:33:19,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742172_1348 (size=216) 2024-12-09T13:33:19,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742172_1348 (size=216) 2024-12-09T13:33:19,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:19,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:19,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-4822416528113659700.jar 2024-12-09T13:33:20,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-4640831134102103908.jar 2024-12-09T13:33:20,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:20,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:33:20,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:33:20,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:33:20,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:33:20,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:33:20,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:33:20,841 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:33:20,841 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:33:20,841 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:33:20,841 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:33:20,841 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:33:20,841 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:20,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:20,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:33:20,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:20,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:20,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:33:20,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:33:20,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742174_1350 (size=5175431) 2024-12-09T13:33:20,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742174_1350 (size=5175431) 2024-12-09T13:33:20,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742174_1350 (size=5175431) 2024-12-09T13:33:20,929 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:33:20,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742175_1351 (size=1877034) 2024-12-09T13:33:20,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742175_1351 (size=1877034) 2024-12-09T13:33:20,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742175_1351 (size=1877034) 2024-12-09T13:33:20,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742176_1352 (size=20406) 2024-12-09T13:33:20,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742176_1352 (size=20406) 2024-12-09T13:33:20,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742176_1352 (size=20406) 2024-12-09T13:33:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742177_1353 (size=24096) 2024-12-09T13:33:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742177_1353 (size=24096) 2024-12-09T13:33:20,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742177_1353 (size=24096) 2024-12-09T13:33:20,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742178_1354 (size=503880) 2024-12-09T13:33:20,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742178_1354 (size=503880) 2024-12-09T13:33:20,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742178_1354 (size=503880) 2024-12-09T13:33:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742179_1355 (size=4695811) 2024-12-09T13:33:21,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742179_1355 (size=4695811) 2024-12-09T13:33:21,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742179_1355 (size=4695811) 2024-12-09T13:33:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742180_1356 (size=111872) 2024-12-09T13:33:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742180_1356 (size=111872) 2024-12-09T13:33:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742180_1356 (size=111872) 2024-12-09T13:33:21,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742181_1357 (size=1597213) 2024-12-09T13:33:21,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742181_1357 (size=1597213) 2024-12-09T13:33:21,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742181_1357 (size=1597213) 2024-12-09T13:33:21,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742182_1358 (size=4188619) 2024-12-09T13:33:21,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742182_1358 (size=4188619) 2024-12-09T13:33:21,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742182_1358 (size=4188619) 2024-12-09T13:33:21,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742183_1359 (size=127628) 2024-12-09T13:33:21,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742183_1359 (size=127628) 2024-12-09T13:33:21,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742183_1359 (size=127628) 2024-12-09T13:33:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742184_1360 (size=29229) 2024-12-09T13:33:21,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742184_1360 (size=29229) 2024-12-09T13:33:21,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742184_1360 (size=29229) 2024-12-09T13:33:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742185_1361 (size=45609) 2024-12-09T13:33:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742185_1361 (size=45609) 2024-12-09T13:33:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742185_1361 (size=45609) 2024-12-09T13:33:21,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742186_1362 (size=8360360) 2024-12-09T13:33:21,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742186_1362 (size=8360360) 2024-12-09T13:33:21,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742186_1362 (size=8360360) 2024-12-09T13:33:21,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742187_1363 (size=443172) 2024-12-09T13:33:21,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742187_1363 (size=443172) 2024-12-09T13:33:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742187_1363 (size=443172) 2024-12-09T13:33:21,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742188_1364 (size=1323991) 2024-12-09T13:33:21,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742188_1364 (size=1323991) 2024-12-09T13:33:21,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742188_1364 (size=1323991) 2024-12-09T13:33:21,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742189_1365 (size=232957) 2024-12-09T13:33:21,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742189_1365 (size=232957) 2024-12-09T13:33:21,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742189_1365 (size=232957) 2024-12-09T13:33:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742190_1366 (size=217634) 2024-12-09T13:33:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742190_1366 (size=217634) 2024-12-09T13:33:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742190_1366 (size=217634) 2024-12-09T13:33:21,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742191_1367 (size=903930) 2024-12-09T13:33:21,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742191_1367 (size=903930) 2024-12-09T13:33:21,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742191_1367 (size=903930) 2024-12-09T13:33:21,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742192_1368 (size=77835) 2024-12-09T13:33:21,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742192_1368 (size=77835) 2024-12-09T13:33:21,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742192_1368 (size=77835) 2024-12-09T13:33:21,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742193_1369 (size=1832290) 2024-12-09T13:33:21,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742193_1369 (size=1832290) 2024-12-09T13:33:21,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742193_1369 (size=1832290) 2024-12-09T13:33:21,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742194_1370 (size=30949) 2024-12-09T13:33:21,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742194_1370 (size=30949) 2024-12-09T13:33:21,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742194_1370 (size=30949) 2024-12-09T13:33:21,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742195_1371 (size=136454) 2024-12-09T13:33:21,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742195_1371 (size=136454) 2024-12-09T13:33:21,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742195_1371 (size=136454) 2024-12-09T13:33:21,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742196_1372 (size=322274) 2024-12-09T13:33:21,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742196_1372 (size=322274) 2024-12-09T13:33:21,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742196_1372 (size=322274) 2024-12-09T13:33:21,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742197_1373 (size=131440) 2024-12-09T13:33:21,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742197_1373 (size=131440) 2024-12-09T13:33:21,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742197_1373 (size=131440) 2024-12-09T13:33:21,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742198_1374 (size=6425017) 2024-12-09T13:33:21,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742198_1374 (size=6425017) 2024-12-09T13:33:21,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742198_1374 (size=6425017) 2024-12-09T13:33:21,636 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:33:21,638 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-09T13:33:21,641 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-09T13:33:21,641 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-09T13:33:21,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742199_1375 (size=481) 2024-12-09T13:33:21,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742199_1375 (size=481) 2024-12-09T13:33:21,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742199_1375 (size=481) 2024-12-09T13:33:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742200_1376 (size=21) 2024-12-09T13:33:21,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742200_1376 (size=21) 2024-12-09T13:33:21,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742200_1376 (size=21) 2024-12-09T13:33:21,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742201_1377 (size=304159) 2024-12-09T13:33:21,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742201_1377 (size=304159) 2024-12-09T13:33:21,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742201_1377 (size=304159) 2024-12-09T13:33:22,121 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:33:22,121 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:33:22,295 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:52750 2024-12-09T13:33:26,021 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:33:27,385 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:46580 2024-12-09T13:33:27,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:27,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-09T13:33:27,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742202_1378 (size=349857) 2024-12-09T13:33:27,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742202_1378 (size=349857) 2024-12-09T13:33:27,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742202_1378 (size=349857) 2024-12-09T13:33:29,629 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:59576 2024-12-09T13:33:29,629 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:52758 2024-12-09T13:33:31,034 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1bcbe3770b02c0b22c3abe057c6ed82d changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:33:31,034 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ebf7c4bb66d6a538b64ca52a27de834f changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:33:32,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742203_1379 (size=4945) 2024-12-09T13:33:32,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742203_1379 (size=4945) 2024-12-09T13:33:32,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742203_1379 (size=4945) 2024-12-09T13:33:32,830 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000002/launch_container.sh] 2024-12-09T13:33:32,830 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000002/container_tokens] 2024-12-09T13:33:32,830 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000002/sysfs] 2024-12-09T13:33:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742205_1381 (size=4945) 2024-12-09T13:33:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742205_1381 (size=4945) 2024-12-09T13:33:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742205_1381 (size=4945) 2024-12-09T13:33:33,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742204_1380 (size=22243) 2024-12-09T13:33:33,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742204_1380 (size=22243) 2024-12-09T13:33:33,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742204_1380 (size=22243) 2024-12-09T13:33:33,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742206_1382 (size=482) 2024-12-09T13:33:33,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742206_1382 (size=482) 2024-12-09T13:33:33,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742206_1382 (size=482) 2024-12-09T13:33:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742207_1383 (size=22243) 2024-12-09T13:33:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742207_1383 (size=22243) 2024-12-09T13:33:33,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742207_1383 (size=22243) 2024-12-09T13:33:33,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742208_1384 (size=349857) 2024-12-09T13:33:33,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742208_1384 (size=349857) 2024-12-09T13:33:33,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742208_1384 (size=349857) 2024-12-09T13:33:33,309 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:45794 2024-12-09T13:33:33,319 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:53138 2024-12-09T13:33:33,329 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000003/launch_container.sh] 2024-12-09T13:33:33,329 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000003/container_tokens] 2024-12-09T13:33:33,329 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000003/sysfs] 2024-12-09T13:33:35,254 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:33:35,255 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:33:35,260 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,261 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:33:35,261 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:33:35,261 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-09T13:33:35,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-09T13:33:35,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-09T13:33:35,262 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751199783/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-09T13:33:35,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-09T13:33:35,272 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751215272"}]},"ts":"1733751215272"} 2024-12-09T13:33:35,274 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-09T13:33:35,274 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-09T13:33:35,275 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-09T13:33:35,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, UNASSIGN}] 2024-12-09T13:33:35,277 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, UNASSIGN 2024-12-09T13:33:35,278 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=941fdb94a7ed0c6a947cad68d03d6584, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:35,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, UNASSIGN because future has completed 2024-12-09T13:33:35,279 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:35,279 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:35,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-09T13:33:35,432 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:35,432 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:35,432 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 941fdb94a7ed0c6a947cad68d03d6584, disabling compactions & flushes 2024-12-09T13:33:35,432 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:35,432 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:35,432 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. after waiting 0 ms 2024-12-09T13:33:35,432 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:35,436 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-09T13:33:35,436 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:35,436 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584. 2024-12-09T13:33:35,436 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 941fdb94a7ed0c6a947cad68d03d6584: Waiting for close lock at 1733751215432Running coprocessor pre-close hooks at 1733751215432Disabling compacts and flushes for region at 1733751215432Disabling writes for close at 1733751215432Writing region close event to WAL at 1733751215433 (+1 ms)Running coprocessor post-close hooks at 1733751215436 (+3 ms)Closed at 1733751215436 2024-12-09T13:33:35,438 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:35,438 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=941fdb94a7ed0c6a947cad68d03d6584, regionState=CLOSED 2024-12-09T13:33:35,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:35,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-09T13:33:35,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 941fdb94a7ed0c6a947cad68d03d6584, server=c48e4b8eb196,44683,1733750968041 in 161 msec 2024-12-09T13:33:35,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-09T13:33:35,444 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=941fdb94a7ed0c6a947cad68d03d6584, UNASSIGN in 166 msec 2024-12-09T13:33:35,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-09T13:33:35,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 170 msec 2024-12-09T13:33:35,447 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751215447"}]},"ts":"1733751215447"} 2024-12-09T13:33:35,448 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-09T13:33:35,448 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-09T13:33:35,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 180 msec 2024-12-09T13:33:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-09T13:33:35,594 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T13:33:35,596 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,599 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,600 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,603 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,604 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:35,605 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:35,605 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:35,606 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/recovered.edits] 2024-12-09T13:33:35,606 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/recovered.edits] 2024-12-09T13:33:35,606 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/recovered.edits] 2024-12-09T13:33:35,611 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf/2fd449deb41745589aec80d9c2c88310 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/cf/2fd449deb41745589aec80d9c2c88310 2024-12-09T13:33:35,611 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/03f259ed668a4d2ea7c04171d3138b64.d2d53892ea44e719d7e3dbc6c23059b4 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/03f259ed668a4d2ea7c04171d3138b64.d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:35,611 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf/03f259ed668a4d2ea7c04171d3138b64 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/cf/03f259ed668a4d2ea7c04171d3138b64 2024-12-09T13:33:35,612 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/2fd449deb41745589aec80d9c2c88310.e6d6e668b7499b15a30493da1b169dae to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/cf/2fd449deb41745589aec80d9c2c88310.e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:35,614 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/recovered.edits/8.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae/recovered.edits/8.seqid 2024-12-09T13:33:35,614 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/recovered.edits/8.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4/recovered.edits/8.seqid 2024-12-09T13:33:35,614 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e6d6e668b7499b15a30493da1b169dae 2024-12-09T13:33:35,614 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d2d53892ea44e719d7e3dbc6c23059b4 2024-12-09T13:33:35,615 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/recovered.edits/12.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584/recovered.edits/12.seqid 2024-12-09T13:33:35,616 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/941fdb94a7ed0c6a947cad68d03d6584 2024-12-09T13:33:35,616 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-09T13:33:35,618 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,621 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-09T13:33:35,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-09T13:33:35,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-09T13:33:35,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-09T13:33:35,631 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-09T13:33:35,632 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-09T13:33:35,633 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,633 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-09T13:33:35,633 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751215633"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:35,635 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-09T13:33:35,635 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 941fdb94a7ed0c6a947cad68d03d6584, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T13:33:35,635 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-09T13:33:35,635 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751215635"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:35,636 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-09T13:33:35,637 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 41 msec 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:35,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:35,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-09T13:33:35,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:35,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:35,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:35,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:35,640 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:35,640 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-09T13:33:35,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-09T13:33:35,643 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751215642"}]},"ts":"1733751215642"} 2024-12-09T13:33:35,644 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-09T13:33:35,644 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-09T13:33:35,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-09T13:33:35,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, UNASSIGN}] 2024-12-09T13:33:35,646 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, UNASSIGN 2024-12-09T13:33:35,646 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, UNASSIGN 2024-12-09T13:33:35,647 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=1bcbe3770b02c0b22c3abe057c6ed82d, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:35,647 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=ebf7c4bb66d6a538b64ca52a27de834f, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:35,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, UNASSIGN because future has completed 2024-12-09T13:33:35,648 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:35,648 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:35,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, UNASSIGN because future has completed 2024-12-09T13:33:35,649 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:35,649 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:35,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-09T13:33:35,800 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:35,801 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:35,801 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 1bcbe3770b02c0b22c3abe057c6ed82d, disabling compactions & flushes 2024-12-09T13:33:35,801 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:35,801 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:35,801 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. after waiting 0 ms 2024-12-09T13:33:35,801 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:35,802 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:35,802 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:35,802 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing ebf7c4bb66d6a538b64ca52a27de834f, disabling compactions & flushes 2024-12-09T13:33:35,802 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:35,802 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:35,802 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. after waiting 0 ms 2024-12-09T13:33:35,802 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:35,808 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:33:35,809 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:33:35,809 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:35,809 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d. 2024-12-09T13:33:35,809 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:35,810 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 1bcbe3770b02c0b22c3abe057c6ed82d: Waiting for close lock at 1733751215801Running coprocessor pre-close hooks at 1733751215801Disabling compacts and flushes for region at 1733751215801Disabling writes for close at 1733751215801Writing region close event to WAL at 1733751215802 (+1 ms)Running coprocessor post-close hooks at 1733751215809 (+7 ms)Closed at 1733751215809 2024-12-09T13:33:35,810 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f. 2024-12-09T13:33:35,810 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for ebf7c4bb66d6a538b64ca52a27de834f: Waiting for close lock at 1733751215802Running coprocessor pre-close hooks at 1733751215802Disabling compacts and flushes for region at 1733751215802Disabling writes for close at 1733751215802Writing region close event to WAL at 1733751215803 (+1 ms)Running coprocessor post-close hooks at 1733751215809 (+6 ms)Closed at 1733751215810 (+1 ms) 2024-12-09T13:33:35,812 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:35,812 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=1bcbe3770b02c0b22c3abe057c6ed82d, regionState=CLOSED 2024-12-09T13:33:35,813 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:35,813 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=ebf7c4bb66d6a538b64ca52a27de834f, regionState=CLOSED 2024-12-09T13:33:35,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:35,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:35,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-12-09T13:33:35,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 1bcbe3770b02c0b22c3abe057c6ed82d, server=c48e4b8eb196,39211,1733750968235 in 167 msec 2024-12-09T13:33:35,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-12-09T13:33:35,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1bcbe3770b02c0b22c3abe057c6ed82d, UNASSIGN in 172 msec 2024-12-09T13:33:35,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure ebf7c4bb66d6a538b64ca52a27de834f, server=c48e4b8eb196,44683,1733750968041 in 167 msec 2024-12-09T13:33:35,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-09T13:33:35,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebf7c4bb66d6a538b64ca52a27de834f, UNASSIGN in 173 msec 2024-12-09T13:33:35,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-09T13:33:35,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 176 msec 2024-12-09T13:33:35,823 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751215823"}]},"ts":"1733751215823"} 2024-12-09T13:33:35,825 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-09T13:33:35,825 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-09T13:33:35,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 186 msec 2024-12-09T13:33:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-09T13:33:35,963 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T13:33:35,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,965 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,965 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,967 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,969 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:35,969 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:35,970 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/recovered.edits] 2024-12-09T13:33:35,970 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/recovered.edits] 2024-12-09T13:33:35,974 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf/a3b63c41c8194a7190ae67bd3c72b394 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/cf/a3b63c41c8194a7190ae67bd3c72b394 2024-12-09T13:33:35,975 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf/68ece6dc240345cbb186c6b74c806267 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/cf/68ece6dc240345cbb186c6b74c806267 2024-12-09T13:33:35,977 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f/recovered.edits/9.seqid 2024-12-09T13:33:35,977 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d/recovered.edits/9.seqid 2024-12-09T13:33:35,977 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebf7c4bb66d6a538b64ca52a27de834f 2024-12-09T13:33:35,977 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithMergeRegion/1bcbe3770b02c0b22c3abe057c6ed82d 2024-12-09T13:33:35,977 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-09T13:33:35,979 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,982 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-09T13:33:35,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T13:33:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T13:33:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T13:33:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-09T13:33:35,999 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-09T13:33:36,000 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,000 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-09T13:33:36,000 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751216000"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:36,000 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751216000"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:36,002 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:33:36,002 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1bcbe3770b02c0b22c3abe057c6ed82d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733751195786.1bcbe3770b02c0b22c3abe057c6ed82d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ebf7c4bb66d6a538b64ca52a27de834f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733751195786.ebf7c4bb66d6a538b64ca52a27de834f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:33:36,002 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-09T13:33:36,002 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751216002"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:36,004 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-09T13:33:36,005 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 42 msec 2024-12-09T13:33:36,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-09T13:33:36,007 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,007 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-09T13:33:36,014 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-09T13:33:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,017 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-09T13:33:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:36,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-09T13:33:36,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:36,039 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=818 (was 808) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5867 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:37601 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:35228 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 145582) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_159027148_1 at /127.0.0.1:35890 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:33133 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37601 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:35896 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:50616 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_159027148_1 at /127.0.0.1:35202 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=825 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=689 (was 771), ProcessCount=17 (was 20), AvailableMemoryMB=1541 (was 1448) - AvailableMemoryMB LEAK? - 2024-12-09T13:33:36,039 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-12-09T13:33:36,055 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=818, OpenFileDescriptor=825, MaxFileDescriptor=1048576, SystemLoadAverage=689, ProcessCount=17, AvailableMemoryMB=1541 2024-12-09T13:33:36,055 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-12-09T13:33:36,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:33:36,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:36,058 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:33:36,058 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:36,058 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-09T13:33:36,059 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:33:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-09T13:33:36,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742209_1385 (size=407) 2024-12-09T13:33:36,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742209_1385 (size=407) 2024-12-09T13:33:36,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742209_1385 (size=407) 2024-12-09T13:33:36,066 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0453e2ca6eda3288a18254548a7533b3, NAME => 'testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:36,067 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 22b454e79ff2e5b9a8e5078c14673022, NAME => 'testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742210_1386 (size=68) 2024-12-09T13:33:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742210_1386 (size=68) 2024-12-09T13:33:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742211_1387 (size=68) 2024-12-09T13:33:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742211_1387 (size=68) 2024-12-09T13:33:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742210_1386 (size=68) 2024-12-09T13:33:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742211_1387 (size=68) 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 0453e2ca6eda3288a18254548a7533b3, disabling compactions & flushes 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 22b454e79ff2e5b9a8e5078c14673022, disabling compactions & flushes 2024-12-09T13:33:36,076 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,076 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. after waiting 0 ms 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. after waiting 0 ms 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,076 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,076 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 22b454e79ff2e5b9a8e5078c14673022: Waiting for close lock at 1733751216076Disabling compacts and flushes for region at 1733751216076Disabling writes for close at 1733751216076Writing region close event to WAL at 1733751216076Closed at 1733751216076 2024-12-09T13:33:36,076 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0453e2ca6eda3288a18254548a7533b3: Waiting for close lock at 1733751216076Disabling compacts and flushes for region at 1733751216076Disabling writes for close at 1733751216076Writing region close event to WAL at 1733751216076Closed at 1733751216076 2024-12-09T13:33:36,077 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:33:36,077 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733751216077"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751216077"}]},"ts":"1733751216077"} 2024-12-09T13:33:36,077 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733751216077"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751216077"}]},"ts":"1733751216077"} 2024-12-09T13:33:36,079 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:33:36,080 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:33:36,080 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751216080"}]},"ts":"1733751216080"} 2024-12-09T13:33:36,081 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-09T13:33:36,082 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:33:36,083 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:33:36,083 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:33:36,083 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:33:36,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:33:36,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, ASSIGN}] 2024-12-09T13:33:36,084 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, ASSIGN 2024-12-09T13:33:36,084 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, ASSIGN 2024-12-09T13:33:36,085 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:33:36,085 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:33:36,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-09T13:33:36,236 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:33:36,237 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=22b454e79ff2e5b9a8e5078c14673022, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:36,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=0453e2ca6eda3288a18254548a7533b3, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:36,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, ASSIGN because future has completed 2024-12-09T13:33:36,241 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22b454e79ff2e5b9a8e5078c14673022, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:36,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, ASSIGN because future has completed 2024-12-09T13:33:36,242 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0453e2ca6eda3288a18254548a7533b3, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-09T13:33:36,396 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,397 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 22b454e79ff2e5b9a8e5078c14673022, NAME => 'testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:33:36,397 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. service=AccessControlService 2024-12-09T13:33:36,397 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:36,397 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,398 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:36,398 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,398 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,398 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,398 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => 0453e2ca6eda3288a18254548a7533b3, NAME => 'testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:33:36,399 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. service=AccessControlService 2024-12-09T13:33:36,399 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:36,399 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,399 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:36,399 INFO [StoreOpener-22b454e79ff2e5b9a8e5078c14673022-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,399 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,399 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,400 INFO [StoreOpener-0453e2ca6eda3288a18254548a7533b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,400 INFO [StoreOpener-22b454e79ff2e5b9a8e5078c14673022-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 22b454e79ff2e5b9a8e5078c14673022 columnFamilyName cf 2024-12-09T13:33:36,400 DEBUG [StoreOpener-22b454e79ff2e5b9a8e5078c14673022-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:36,401 INFO [StoreOpener-22b454e79ff2e5b9a8e5078c14673022-1 {}] regionserver.HStore(327): Store=22b454e79ff2e5b9a8e5078c14673022/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:36,401 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,401 INFO [StoreOpener-0453e2ca6eda3288a18254548a7533b3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0453e2ca6eda3288a18254548a7533b3 columnFamilyName cf 2024-12-09T13:33:36,401 DEBUG [StoreOpener-0453e2ca6eda3288a18254548a7533b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:36,401 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,402 INFO [StoreOpener-0453e2ca6eda3288a18254548a7533b3-1 {}] regionserver.HStore(327): Store=0453e2ca6eda3288a18254548a7533b3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:36,402 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,402 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,402 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,402 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,402 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,403 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,403 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,403 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,403 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,404 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,405 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:36,405 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 22b454e79ff2e5b9a8e5078c14673022; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66041143, jitterRate=-0.015910282731056213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:36,405 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,406 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:36,406 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 22b454e79ff2e5b9a8e5078c14673022: Running coprocessor pre-open hook at 1733751216398Writing region info on filesystem at 1733751216398Initializing all the Stores at 1733751216399 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751216399Cleaning up temporary data from old regions at 1733751216402 (+3 ms)Running coprocessor post-open hooks at 1733751216405 (+3 ms)Region opened successfully at 1733751216406 (+1 ms) 2024-12-09T13:33:36,406 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened 0453e2ca6eda3288a18254548a7533b3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69169126, jitterRate=0.03070029616355896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:36,406 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,406 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for 0453e2ca6eda3288a18254548a7533b3: Running coprocessor pre-open hook at 1733751216399Writing region info on filesystem at 1733751216399Initializing all the Stores at 1733751216400 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751216400Cleaning up temporary data from old regions at 1733751216403 (+3 ms)Running coprocessor post-open hooks at 1733751216406 (+3 ms)Region opened successfully at 1733751216406 2024-12-09T13:33:36,407 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022., pid=172, masterSystemTime=1733751216394 2024-12-09T13:33:36,407 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3., pid=173, masterSystemTime=1733751216396 2024-12-09T13:33:36,408 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,408 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=0453e2ca6eda3288a18254548a7533b3, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:36,409 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,409 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,409 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=22b454e79ff2e5b9a8e5078c14673022, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:36,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0453e2ca6eda3288a18254548a7533b3, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:36,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22b454e79ff2e5b9a8e5078c14673022, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:36,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-09T13:33:36,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 0453e2ca6eda3288a18254548a7533b3, server=c48e4b8eb196,44683,1733750968041 in 169 msec 2024-12-09T13:33:36,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-09T13:33:36,413 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, ASSIGN in 329 msec 2024-12-09T13:33:36,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 22b454e79ff2e5b9a8e5078c14673022, server=c48e4b8eb196,39211,1733750968235 in 171 msec 2024-12-09T13:33:36,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-09T13:33:36,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, ASSIGN in 330 msec 2024-12-09T13:33:36,415 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:33:36,415 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751216415"}]},"ts":"1733751216415"} 2024-12-09T13:33:36,417 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-09T13:33:36,418 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:33:36,418 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-09T13:33:36,421 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T13:33:36,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:36,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:36,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:36,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:36,456 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:36,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 400 msec 2024-12-09T13:33:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-09T13:33:36,683 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T13:33:36,683 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-09T13:33:36,684 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:36,688 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-09T13:33:36,688 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:36,688 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-09T13:33:36,688 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:36,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T13:33:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751216691 (current time:1733751216691). 2024-12-09T13:33:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-09T13:33:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aff645b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:36,693 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:36,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:36,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:36,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@158e38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:36,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:36,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:36,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:36,695 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:36,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ea8c2cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:36,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:36,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:36,697 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:36,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:36,699 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1066a682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:36,700 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:36,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:36,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:36,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@794c7178, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:36,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:36,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:36,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:36,701 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:36,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66d2f377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:36,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:36,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:36,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:36,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43694, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:36,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:36,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:36,706 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T13:33:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:36,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T13:33:36,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-09T13:33:36,709 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-09T13:33:36,710 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:36,711 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:36,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742212_1388 (size=170) 2024-12-09T13:33:36,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742212_1388 (size=170) 2024-12-09T13:33:36,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742212_1388 (size=170) 2024-12-09T13:33:36,718 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:36,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022}] 2024-12-09T13:33:36,719 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,719 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-09T13:33:36,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-09T13:33:36,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-09T13:33:36,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 22b454e79ff2e5b9a8e5078c14673022: 2024-12-09T13:33:36,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 0453e2ca6eda3288a18254548a7533b3: 2024-12-09T13:33:36,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-09T13:33:36,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-09T13:33:36,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-09T13:33:36,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-09T13:33:36,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:36,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:36,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:33:36,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:33:36,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742214_1390 (size=71) 2024-12-09T13:33:36,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742214_1390 (size=71) 2024-12-09T13:33:36,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742214_1390 (size=71) 2024-12-09T13:33:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:36,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-09T13:33:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-09T13:33:36,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,886 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:36,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022 in 169 msec 2024-12-09T13:33:36,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742213_1389 (size=71) 2024-12-09T13:33:36,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742213_1389 (size=71) 2024-12-09T13:33:36,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742213_1389 (size=71) 2024-12-09T13:33:36,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:36,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-09T13:33:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-09T13:33:36,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,893 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:36,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-09T13:33:36,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3 in 176 msec 2024-12-09T13:33:36,895 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:36,896 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:36,896 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:36,896 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-09T13:33:36,897 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-09T13:33:36,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742215_1391 (size=552) 2024-12-09T13:33:36,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742215_1391 (size=552) 2024-12-09T13:33:36,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742215_1391 (size=552) 2024-12-09T13:33:36,916 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:36,921 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:36,921 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-09T13:33:36,923 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:36,923 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-09T13:33:36,924 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 216 msec 2024-12-09T13:33:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-09T13:33:37,024 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T13:33:37,033 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='068ee464c496fcbf90cd86a1f63a7baae', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:37,034 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='11e6df074897fd7b5b8e50679e91e19c7', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:37,036 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='24a695d4bb611968fe92523e8f6d30d03', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:37,036 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3465e0a7a4569262b653c516eedc8d005', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:37,038 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:37,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:37,040 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:37,043 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-09T13:33:37,043 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:37,043 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:37,045 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:37,049 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:37,053 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:37,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751217055 (current time:1733751217055). 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4178a0aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:37,056 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:37,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:37,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:37,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4150d332, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:37,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:37,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:37,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:37,057 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:37,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@654c58ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:37,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:37,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:37,059 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:37,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:37,060 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f694c7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:37,061 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:37,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:37,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:37,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@634432ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:37,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:37,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:37,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:37,063 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:37,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3355bd34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:37,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:37,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:37,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43708, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:37,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:37,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:37,067 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T13:33:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-09T13:33:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-09T13:33:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-09T13:33:37,069 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:37,070 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:37,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:37,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742216_1392 (size=165) 2024-12-09T13:33:37,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742216_1392 (size=165) 2024-12-09T13:33:37,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742216_1392 (size=165) 2024-12-09T13:33:37,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:37,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022}] 2024-12-09T13:33:37,080 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:37,080 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-09T13:33:37,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-09T13:33:37,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-09T13:33:37,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:37,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:37,234 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 22b454e79ff2e5b9a8e5078c14673022 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-09T13:33:37,234 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 0453e2ca6eda3288a18254548a7533b3 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-09T13:33:37,251 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/.tmp/cf/a0a871389813419d9b40e433b4b17bd2 is 71, key is 034095bee30b047c380f582ae5d9f152/cf:q/1733751217038/Put/seqid=0 2024-12-09T13:33:37,251 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/.tmp/cf/c06d81d0e1714720b20f4420ca1c341b is 71, key is 168d26658de6c679f58a26ad7bd67125/cf:q/1733751217039/Put/seqid=0 2024-12-09T13:33:37,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742217_1393 (size=8394) 2024-12-09T13:33:37,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742217_1393 (size=8394) 2024-12-09T13:33:37,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742218_1394 (size=5216) 2024-12-09T13:33:37,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742217_1393 (size=8394) 2024-12-09T13:33:37,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742218_1394 (size=5216) 2024-12-09T13:33:37,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742218_1394 (size=5216) 2024-12-09T13:33:37,257 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/.tmp/cf/c06d81d0e1714720b20f4420ca1c341b 2024-12-09T13:33:37,257 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/.tmp/cf/a0a871389813419d9b40e433b4b17bd2 2024-12-09T13:33:37,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/.tmp/cf/c06d81d0e1714720b20f4420ca1c341b as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf/c06d81d0e1714720b20f4420ca1c341b 2024-12-09T13:33:37,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/.tmp/cf/a0a871389813419d9b40e433b4b17bd2 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf/a0a871389813419d9b40e433b4b17bd2 2024-12-09T13:33:37,265 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf/c06d81d0e1714720b20f4420ca1c341b, entries=48, sequenceid=6, filesize=8.2 K 2024-12-09T13:33:37,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf/a0a871389813419d9b40e433b4b17bd2, entries=2, sequenceid=6, filesize=5.1 K 2024-12-09T13:33:37,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 22b454e79ff2e5b9a8e5078c14673022 in 32ms, sequenceid=6, compaction requested=false 2024-12-09T13:33:37,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-09T13:33:37,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 22b454e79ff2e5b9a8e5078c14673022: 2024-12-09T13:33:37,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. for snaptb0-testExportExpiredSnapshot completed. 2024-12-09T13:33:37,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:37,267 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 0453e2ca6eda3288a18254548a7533b3 in 33ms, sequenceid=6, compaction requested=false 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf/c06d81d0e1714720b20f4420ca1c341b] hfiles 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf/c06d81d0e1714720b20f4420ca1c341b for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 0453e2ca6eda3288a18254548a7533b3: 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. for snaptb0-testExportExpiredSnapshot completed. 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf/a0a871389813419d9b40e433b4b17bd2] hfiles 2024-12-09T13:33:37,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf/a0a871389813419d9b40e433b4b17bd2 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742219_1395 (size=110) 2024-12-09T13:33:37,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742220_1396 (size=110) 2024-12-09T13:33:37,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742219_1395 (size=110) 2024-12-09T13:33:37,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742220_1396 (size=110) 2024-12-09T13:33:37,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742219_1395 (size=110) 2024-12-09T13:33:37,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742220_1396 (size=110) 2024-12-09T13:33:37,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:37,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:37,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-09T13:33:37,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-09T13:33:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-09T13:33:37,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-09T13:33:37,286 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:37,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:37,286 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:37,286 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:37,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22b454e79ff2e5b9a8e5078c14673022 in 208 msec 2024-12-09T13:33:37,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-12-09T13:33:37,289 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:37,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0453e2ca6eda3288a18254548a7533b3 in 208 msec 2024-12-09T13:33:37,289 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:37,290 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:37,290 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,290 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742221_1397 (size=630) 2024-12-09T13:33:37,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742221_1397 (size=630) 2024-12-09T13:33:37,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742221_1397 (size=630) 2024-12-09T13:33:37,305 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:37,309 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:37,310 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:37,311 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:37,311 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-09T13:33:37,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 243 msec 2024-12-09T13:33:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-09T13:33:37,383 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T13:33:37,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:33:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-09T13:33:37,386 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:33:37,386 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:37,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-09T13:33:37,387 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:33:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T13:33:37,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742222_1398 (size=400) 2024-12-09T13:33:37,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742222_1398 (size=400) 2024-12-09T13:33:37,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742222_1398 (size=400) 2024-12-09T13:33:37,395 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b965e8c57383b84f23fb09c5323ac145, NAME => 'testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:37,395 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f0043ab8119e670f2bd7cf64ad44aea8, NAME => 'testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:37,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742224_1400 (size=61) 2024-12-09T13:33:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742224_1400 (size=61) 2024-12-09T13:33:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742224_1400 (size=61) 2024-12-09T13:33:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742223_1399 (size=61) 2024-12-09T13:33:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742223_1399 (size=61) 2024-12-09T13:33:37,405 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742223_1399 (size=61) 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing b965e8c57383b84f23fb09c5323ac145, disabling compactions & flushes 2024-12-09T13:33:37,406 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. after waiting 0 ms 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,406 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for b965e8c57383b84f23fb09c5323ac145: Waiting for close lock at 1733751217406Disabling compacts and flushes for region at 1733751217406Disabling writes for close at 1733751217406Writing region close event to WAL at 1733751217406Closed at 1733751217406 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing f0043ab8119e670f2bd7cf64ad44aea8, disabling compactions & flushes 2024-12-09T13:33:37,406 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. after waiting 0 ms 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,406 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,406 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for f0043ab8119e670f2bd7cf64ad44aea8: Waiting for close lock at 1733751217406Disabling compacts and flushes for region at 1733751217406Disabling writes for close at 1733751217406Writing region close event to WAL at 1733751217406Closed at 1733751217406 2024-12-09T13:33:37,407 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:33:37,407 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733751217407"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751217407"}]},"ts":"1733751217407"} 2024-12-09T13:33:37,407 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733751217407"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751217407"}]},"ts":"1733751217407"} 2024-12-09T13:33:37,409 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:33:37,410 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:33:37,410 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751217410"}]},"ts":"1733751217410"} 2024-12-09T13:33:37,411 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-09T13:33:37,412 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:33:37,413 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:33:37,413 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:33:37,413 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:33:37,413 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:33:37,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b965e8c57383b84f23fb09c5323ac145, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f0043ab8119e670f2bd7cf64ad44aea8, ASSIGN}] 2024-12-09T13:33:37,414 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f0043ab8119e670f2bd7cf64ad44aea8, ASSIGN 2024-12-09T13:33:37,414 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b965e8c57383b84f23fb09c5323ac145, ASSIGN 2024-12-09T13:33:37,415 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f0043ab8119e670f2bd7cf64ad44aea8, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:33:37,415 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b965e8c57383b84f23fb09c5323ac145, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:33:37,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-09T13:33:37,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-09T13:33:37,486 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-09T13:33:37,487 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-09T13:33:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T13:33:37,565 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:33:37,566 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=f0043ab8119e670f2bd7cf64ad44aea8, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:37,566 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=b965e8c57383b84f23fb09c5323ac145, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:33:37,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f0043ab8119e670f2bd7cf64ad44aea8, ASSIGN because future has completed 2024-12-09T13:33:37,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:37,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b965e8c57383b84f23fb09c5323ac145, ASSIGN because future has completed 2024-12-09T13:33:37,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure b965e8c57383b84f23fb09c5323ac145, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:33:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T13:33:37,730 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,730 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => f0043ab8119e670f2bd7cf64ad44aea8, NAME => 'testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:33:37,731 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,731 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. service=AccessControlService 2024-12-09T13:33:37,731 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => b965e8c57383b84f23fb09c5323ac145, NAME => 'testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:33:37,732 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:37,732 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. service=AccessControlService 2024-12-09T13:33:37,732 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,732 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:37,732 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:37,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:37,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,733 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,735 INFO [StoreOpener-f0043ab8119e670f2bd7cf64ad44aea8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,735 INFO [StoreOpener-b965e8c57383b84f23fb09c5323ac145-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,736 INFO [StoreOpener-f0043ab8119e670f2bd7cf64ad44aea8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f0043ab8119e670f2bd7cf64ad44aea8 columnFamilyName cf 2024-12-09T13:33:37,736 INFO [StoreOpener-b965e8c57383b84f23fb09c5323ac145-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b965e8c57383b84f23fb09c5323ac145 columnFamilyName cf 2024-12-09T13:33:37,736 DEBUG [StoreOpener-b965e8c57383b84f23fb09c5323ac145-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:37,736 DEBUG [StoreOpener-f0043ab8119e670f2bd7cf64ad44aea8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:37,736 INFO [StoreOpener-b965e8c57383b84f23fb09c5323ac145-1 {}] regionserver.HStore(327): Store=b965e8c57383b84f23fb09c5323ac145/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:37,736 INFO [StoreOpener-f0043ab8119e670f2bd7cf64ad44aea8-1 {}] regionserver.HStore(327): Store=f0043ab8119e670f2bd7cf64ad44aea8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:37,736 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,736 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,736 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,736 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,737 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,738 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,738 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,740 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:37,740 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:37,740 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened b965e8c57383b84f23fb09c5323ac145; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68953753, jitterRate=0.02749098837375641}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:37,740 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened f0043ab8119e670f2bd7cf64ad44aea8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71272188, jitterRate=0.0620383620262146}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:37,740 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:37,740 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:37,741 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for b965e8c57383b84f23fb09c5323ac145: Running coprocessor pre-open hook at 1733751217733Writing region info on filesystem at 1733751217733Initializing all the Stores at 1733751217734 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751217734Cleaning up temporary data from old regions at 1733751217737 (+3 ms)Running coprocessor post-open hooks at 1733751217740 (+3 ms)Region opened successfully at 1733751217741 (+1 ms) 2024-12-09T13:33:37,741 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for f0043ab8119e670f2bd7cf64ad44aea8: Running coprocessor pre-open hook at 1733751217733Writing region info on filesystem at 1733751217733Initializing all the Stores at 1733751217734 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751217734Cleaning up temporary data from old regions at 1733751217737 (+3 ms)Running coprocessor post-open hooks at 1733751217740 (+3 ms)Region opened successfully at 1733751217741 (+1 ms) 2024-12-09T13:33:37,741 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8., pid=183, masterSystemTime=1733751217722 2024-12-09T13:33:37,743 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145., pid=184, masterSystemTime=1733751217724 2024-12-09T13:33:37,743 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,743 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:37,743 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=f0043ab8119e670f2bd7cf64ad44aea8, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:37,744 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,744 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:37,744 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=b965e8c57383b84f23fb09c5323ac145, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:33:37,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:37,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure b965e8c57383b84f23fb09c5323ac145, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:33:37,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-12-09T13:33:37,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8, server=c48e4b8eb196,39211,1733750968235 in 176 msec 2024-12-09T13:33:37,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=f0043ab8119e670f2bd7cf64ad44aea8, ASSIGN in 335 msec 2024-12-09T13:33:37,749 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-12-09T13:33:37,749 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure b965e8c57383b84f23fb09c5323ac145, server=c48e4b8eb196,36493,1733750968163 in 177 msec 2024-12-09T13:33:37,750 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-09T13:33:37,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=b965e8c57383b84f23fb09c5323ac145, ASSIGN in 336 msec 2024-12-09T13:33:37,751 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:33:37,751 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751217751"}]},"ts":"1733751217751"} 2024-12-09T13:33:37,753 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-09T13:33:37,753 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:33:37,754 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-09T13:33:37,756 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T13:33:37,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:37,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:37,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:37,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:37,828 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,828 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,828 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,828 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,829 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:37,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 443 msec 2024-12-09T13:33:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-09T13:33:38,014 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-09T13:33:38,014 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-09T13:33:38,014 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:38,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-09T13:33:38,023 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:38,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-12-09T13:33:38,023 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:38,032 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='0a4dc7e2401aceabe71342c7829cb3c9e', locateType=CURRENT is [region=testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:33:38,033 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='17ec430f35eeb93c45f7169fe3f1c9b18', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:38,033 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='28218d3eda9f60dfe40e565f03df66c72', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:38,034 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='3997d2c391fa228d430270046fb209e5c', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:38,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:38,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:38,042 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:38,044 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-09T13:33:38,044 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:38,045 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:38,047 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:38,051 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-09T13:33:38,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-09T13:33:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-09T13:33:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb7a101, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:38,057 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cbef7ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:38,058 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:38,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e36fdce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:38,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:38,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:38,060 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:38,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:38,061 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a99e481, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:38,062 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c54d932, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:38,063 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42742, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:38,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21c6fb4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:38,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:38,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:38,066 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43726, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:38,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:38,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:38,068 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T13:33:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-09T13:33:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-09T13:33:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-09T13:33:38,070 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:38,071 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:38,072 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:38,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742225_1401 (size=152) 2024-12-09T13:33:38,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742225_1401 (size=152) 2024-12-09T13:33:38,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742225_1401 (size=152) 2024-12-09T13:33:38,079 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:38,079 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b965e8c57383b84f23fb09c5323ac145}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8}] 2024-12-09T13:33:38,080 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:38,080 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-09T13:33:38,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-09T13:33:38,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-09T13:33:38,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:38,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:38,234 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing b965e8c57383b84f23fb09c5323ac145 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-09T13:33:38,234 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing f0043ab8119e670f2bd7cf64ad44aea8 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-09T13:33:38,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/.tmp/cf/a2d703bca63f40c09faf6c40e5b403f2 is 71, key is 014cb8a29819731944e000b32d1b4564/cf:q/1733751218039/Put/seqid=0 2024-12-09T13:33:38,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/.tmp/cf/ea5667c54511426890b7dc93b3402f88 is 71, key is 13f46062d361bd2bec00916157da0013/cf:q/1733751218041/Put/seqid=0 2024-12-09T13:33:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742227_1403 (size=8188) 2024-12-09T13:33:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742226_1402 (size=5422) 2024-12-09T13:33:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742227_1403 (size=8188) 2024-12-09T13:33:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742226_1402 (size=5422) 2024-12-09T13:33:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742227_1403 (size=8188) 2024-12-09T13:33:38,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742226_1402 (size=5422) 2024-12-09T13:33:38,260 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/.tmp/cf/ea5667c54511426890b7dc93b3402f88 2024-12-09T13:33:38,260 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/.tmp/cf/a2d703bca63f40c09faf6c40e5b403f2 2024-12-09T13:33:38,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/.tmp/cf/a2d703bca63f40c09faf6c40e5b403f2 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/cf/a2d703bca63f40c09faf6c40e5b403f2 2024-12-09T13:33:38,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/.tmp/cf/ea5667c54511426890b7dc93b3402f88 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/cf/ea5667c54511426890b7dc93b3402f88 2024-12-09T13:33:38,267 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/cf/ea5667c54511426890b7dc93b3402f88, entries=45, sequenceid=5, filesize=8.0 K 2024-12-09T13:33:38,267 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/cf/a2d703bca63f40c09faf6c40e5b403f2, entries=5, sequenceid=5, filesize=5.3 K 2024-12-09T13:33:38,268 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for b965e8c57383b84f23fb09c5323ac145 in 33ms, sequenceid=5, compaction requested=false 2024-12-09T13:33:38,268 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for f0043ab8119e670f2bd7cf64ad44aea8 in 33ms, sequenceid=5, compaction requested=false 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for f0043ab8119e670f2bd7cf64ad44aea8: 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for b965e8c57383b84f23fb09c5323ac145: 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. for snapshot-testExportExpiredSnapshot completed. 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. for snapshot-testExportExpiredSnapshot completed. 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/cf/ea5667c54511426890b7dc93b3402f88] hfiles 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/cf/a2d703bca63f40c09faf6c40e5b403f2] hfiles 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/cf/a2d703bca63f40c09faf6c40e5b403f2 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/cf/ea5667c54511426890b7dc93b3402f88 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742229_1405 (size=103) 2024-12-09T13:33:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742229_1405 (size=103) 2024-12-09T13:33:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742228_1404 (size=103) 2024-12-09T13:33:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742229_1405 (size=103) 2024-12-09T13:33:38,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:33:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742228_1404 (size=103) 2024-12-09T13:33:38,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-09T13:33:38,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742228_1404 (size=103) 2024-12-09T13:33:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-09T13:33:38,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:38,281 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:33:38,282 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f0043ab8119e670f2bd7cf64ad44aea8 in 203 msec 2024-12-09T13:33:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-09T13:33:38,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:33:38,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-09T13:33:38,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-09T13:33:38,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:38,684 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:33:38,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-09T13:33:38,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b965e8c57383b84f23fb09c5323ac145 in 607 msec 2024-12-09T13:33:38,690 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:38,691 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:38,693 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:38,693 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-09T13:33:38,694 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742230_1406 (size=609) 2024-12-09T13:33:38,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742230_1406 (size=609) 2024-12-09T13:33:38,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742230_1406 (size=609) 2024-12-09T13:33:38,707 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:38,713 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:38,714 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-09T13:33:38,715 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:38,716 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-09T13:33:38,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 647 msec 2024-12-09T13:33:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-09T13:33:39,204 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-09T13:33:39,396 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0007_000001 (auth:SIMPLE) from 127.0.0.1:53150 2024-12-09T13:33:39,404 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000001/launch_container.sh] 2024-12-09T13:33:39,404 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000001/container_tokens] 2024-12-09T13:33:39,404 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0007/container_1733750974572_0007_01_000001/sysfs] 2024-12-09T13:33:40,962 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:33:47,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-09T13:33:47,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-09T13:33:49,219 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751229219 2024-12-09T13:33:49,220 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751229219, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751229219, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:49,248 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:49,248 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751229219, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751229219/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-09T13:33:49,250 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:33:49,251 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:33:49,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T13:33:49,255 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751229254"}]},"ts":"1733751229254"} 2024-12-09T13:33:49,256 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-09T13:33:49,256 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-09T13:33:49,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-09T13:33:49,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, UNASSIGN}] 2024-12-09T13:33:49,258 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, UNASSIGN 2024-12-09T13:33:49,258 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, UNASSIGN 2024-12-09T13:33:49,259 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=22b454e79ff2e5b9a8e5078c14673022, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:33:49,259 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=0453e2ca6eda3288a18254548a7533b3, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:49,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, UNASSIGN because future has completed 2024-12-09T13:33:49,261 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:49,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0453e2ca6eda3288a18254548a7533b3, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:49,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, UNASSIGN because future has completed 2024-12-09T13:33:49,261 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:33:49,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22b454e79ff2e5b9a8e5078c14673022, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:33:49,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T13:33:49,413 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:49,413 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:49,413 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing 0453e2ca6eda3288a18254548a7533b3, disabling compactions & flushes 2024-12-09T13:33:49,413 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:49,413 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:49,413 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. after waiting 0 ms 2024-12-09T13:33:49,413 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:49,414 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:49,414 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:33:49,414 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 22b454e79ff2e5b9a8e5078c14673022, disabling compactions & flushes 2024-12-09T13:33:49,414 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:49,414 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:49,414 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. after waiting 0 ms 2024-12-09T13:33:49,414 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:49,419 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:33:49,419 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:33:49,419 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:49,420 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:33:49,420 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022. 2024-12-09T13:33:49,420 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 22b454e79ff2e5b9a8e5078c14673022: Waiting for close lock at 1733751229414Running coprocessor pre-close hooks at 1733751229414Disabling compacts and flushes for region at 1733751229414Disabling writes for close at 1733751229414Writing region close event to WAL at 1733751229415 (+1 ms)Running coprocessor post-close hooks at 1733751229419 (+4 ms)Closed at 1733751229420 (+1 ms) 2024-12-09T13:33:49,420 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3. 2024-12-09T13:33:49,420 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for 0453e2ca6eda3288a18254548a7533b3: Waiting for close lock at 1733751229413Running coprocessor pre-close hooks at 1733751229413Disabling compacts and flushes for region at 1733751229413Disabling writes for close at 1733751229413Writing region close event to WAL at 1733751229414 (+1 ms)Running coprocessor post-close hooks at 1733751229420 (+6 ms)Closed at 1733751229420 2024-12-09T13:33:49,422 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:49,422 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=22b454e79ff2e5b9a8e5078c14673022, regionState=CLOSED 2024-12-09T13:33:49,423 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed 0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:49,424 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=0453e2ca6eda3288a18254548a7533b3, regionState=CLOSED 2024-12-09T13:33:49,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22b454e79ff2e5b9a8e5078c14673022, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:33:49,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0453e2ca6eda3288a18254548a7533b3, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:49,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-09T13:33:49,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 22b454e79ff2e5b9a8e5078c14673022, server=c48e4b8eb196,39211,1733750968235 in 164 msec 2024-12-09T13:33:49,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-09T13:33:49,429 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22b454e79ff2e5b9a8e5078c14673022, UNASSIGN in 169 msec 2024-12-09T13:33:49,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 0453e2ca6eda3288a18254548a7533b3, server=c48e4b8eb196,44683,1733750968041 in 165 msec 2024-12-09T13:33:49,431 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-12-09T13:33:49,431 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0453e2ca6eda3288a18254548a7533b3, UNASSIGN in 171 msec 2024-12-09T13:33:49,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-09T13:33:49,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 175 msec 2024-12-09T13:33:49,435 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751229435"}]},"ts":"1733751229435"} 2024-12-09T13:33:49,436 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-09T13:33:49,437 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-09T13:33:49,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 185 msec 2024-12-09T13:33:49,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-09T13:33:49,573 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T13:33:49,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,576 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,577 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,580 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,582 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:49,584 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:49,585 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/recovered.edits] 2024-12-09T13:33:49,587 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/recovered.edits] 2024-12-09T13:33:49,589 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf/a0a871389813419d9b40e433b4b17bd2 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/cf/a0a871389813419d9b40e433b4b17bd2 2024-12-09T13:33:49,590 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf/c06d81d0e1714720b20f4420ca1c341b to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/cf/c06d81d0e1714720b20f4420ca1c341b 2024-12-09T13:33:49,592 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3/recovered.edits/9.seqid 2024-12-09T13:33:49,593 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/0453e2ca6eda3288a18254548a7533b3 2024-12-09T13:33:49,596 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022/recovered.edits/9.seqid 2024-12-09T13:33:49,597 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportExpiredSnapshot/22b454e79ff2e5b9a8e5078c14673022 2024-12-09T13:33:49,597 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-09T13:33:49,600 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,604 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-09T13:33:49,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T13:33:49,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T13:33:49,648 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T13:33:49,648 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-09T13:33:49,649 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-09T13:33:49,651 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,651 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-09T13:33:49,651 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751229651"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:49,651 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751229651"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:49,654 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:33:49,654 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0453e2ca6eda3288a18254548a7533b3, NAME => 'testtb-testExportExpiredSnapshot,,1733751216056.0453e2ca6eda3288a18254548a7533b3.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 22b454e79ff2e5b9a8e5078c14673022, NAME => 'testtb-testExportExpiredSnapshot,1,1733751216056.22b454e79ff2e5b9a8e5078c14673022.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:33:49,654 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-09T13:33:49,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:49,654 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751229654"}]},"ts":"9223372036854775807"} 2024-12-09T13:33:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-09T13:33:49,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:49,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:49,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:49,658 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-09T13:33:49,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:49,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:49,659 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:49,662 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:49,662 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 88 msec 2024-12-09T13:33:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-09T13:33:49,763 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-09T13:33:49,763 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-09T13:33:49,773 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-09T13:33:49,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-09T13:33:49,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-09T13:33:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-09T13:33:49,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-09T13:33:49,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-09T13:33:49,809 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=808 (was 818), OpenFileDescriptor=793 (was 825), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=607 (was 689), ProcessCount=17 (was 17), AvailableMemoryMB=1726 (was 1541) - AvailableMemoryMB LEAK? - 2024-12-09T13:33:49,809 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-09T13:33:49,831 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=808, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=607, ProcessCount=17, AvailableMemoryMB=1724 2024-12-09T13:33:49,831 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-09T13:33:49,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:33:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:33:49,835 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:33:49,835 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:49,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-09T13:33:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-09T13:33:49,836 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:33:49,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742231_1407 (size=412) 2024-12-09T13:33:49,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742231_1407 (size=412) 2024-12-09T13:33:49,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742231_1407 (size=412) 2024-12-09T13:33:49,847 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 110ef800bda238aca57583445728de38, NAME => 'testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:49,848 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c75cdc52e2771b47f43c0c2b1619a6d5, NAME => 'testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742232_1408 (size=73) 2024-12-09T13:33:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742232_1408 (size=73) 2024-12-09T13:33:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742232_1408 (size=73) 2024-12-09T13:33:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742233_1409 (size=73) 2024-12-09T13:33:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742233_1409 (size=73) 2024-12-09T13:33:49,870 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:49,870 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing c75cdc52e2771b47f43c0c2b1619a6d5, disabling compactions & flushes 2024-12-09T13:33:49,870 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:49,870 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:49,870 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. after waiting 0 ms 2024-12-09T13:33:49,870 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:49,870 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742233_1409 (size=73) 2024-12-09T13:33:49,870 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for c75cdc52e2771b47f43c0c2b1619a6d5: Waiting for close lock at 1733751229870Disabling compacts and flushes for region at 1733751229870Disabling writes for close at 1733751229870Writing region close event to WAL at 1733751229870Closed at 1733751229870 2024-12-09T13:33:49,871 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:49,871 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 110ef800bda238aca57583445728de38, disabling compactions & flushes 2024-12-09T13:33:49,871 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:49,871 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:49,871 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. after waiting 0 ms 2024-12-09T13:33:49,871 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:49,871 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:49,871 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 110ef800bda238aca57583445728de38: Waiting for close lock at 1733751229871Disabling compacts and flushes for region at 1733751229871Disabling writes for close at 1733751229871Writing region close event to WAL at 1733751229871Closed at 1733751229871 2024-12-09T13:33:49,875 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:33:49,875 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733751229875"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751229875"}]},"ts":"1733751229875"} 2024-12-09T13:33:49,875 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733751229875"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751229875"}]},"ts":"1733751229875"} 2024-12-09T13:33:49,878 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:33:49,878 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:33:49,879 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751229878"}]},"ts":"1733751229878"} 2024-12-09T13:33:49,880 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-09T13:33:49,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:33:49,883 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:33:49,883 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:33:49,883 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:33:49,883 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:33:49,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, ASSIGN}] 2024-12-09T13:33:49,885 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, ASSIGN 2024-12-09T13:33:49,885 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, ASSIGN 2024-12-09T13:33:49,886 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:33:49,886 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:33:49,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-09T13:33:50,036 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:33:50,037 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=110ef800bda238aca57583445728de38, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:33:50,037 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=c75cdc52e2771b47f43c0c2b1619a6d5, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:50,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, ASSIGN because future has completed 2024-12-09T13:33:50,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 110ef800bda238aca57583445728de38, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:33:50,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, ASSIGN because future has completed 2024-12-09T13:33:50,040 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:33:50,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-09T13:33:50,196 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:50,196 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => c75cdc52e2771b47f43c0c2b1619a6d5, NAME => 'testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:33:50,196 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:50,196 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 110ef800bda238aca57583445728de38, NAME => 'testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:33:50,196 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. service=AccessControlService 2024-12-09T13:33:50,196 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. service=AccessControlService 2024-12-09T13:33:50,196 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:50,196 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:33:50,197 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,197 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:50,197 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,197 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,200 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,200 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:33:50,200 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,200 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,203 INFO [StoreOpener-110ef800bda238aca57583445728de38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,204 INFO [StoreOpener-110ef800bda238aca57583445728de38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 110ef800bda238aca57583445728de38 columnFamilyName cf 2024-12-09T13:33:50,204 DEBUG [StoreOpener-110ef800bda238aca57583445728de38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:50,205 INFO [StoreOpener-110ef800bda238aca57583445728de38-1 {}] regionserver.HStore(327): Store=110ef800bda238aca57583445728de38/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:50,206 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,208 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38 2024-12-09T13:33:50,208 INFO [StoreOpener-c75cdc52e2771b47f43c0c2b1619a6d5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,208 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38 2024-12-09T13:33:50,208 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,209 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,210 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,212 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:50,213 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 110ef800bda238aca57583445728de38; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74574941, jitterRate=0.11125321686267853}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:50,213 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,213 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 110ef800bda238aca57583445728de38: Running coprocessor pre-open hook at 1733751230201Writing region info on filesystem at 1733751230201Initializing all the Stores at 1733751230202 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751230202Cleaning up temporary data from old regions at 1733751230209 (+7 ms)Running coprocessor post-open hooks at 1733751230213 (+4 ms)Region opened successfully at 1733751230213 2024-12-09T13:33:50,214 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38., pid=198, masterSystemTime=1733751230192 2024-12-09T13:33:50,215 INFO [StoreOpener-c75cdc52e2771b47f43c0c2b1619a6d5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c75cdc52e2771b47f43c0c2b1619a6d5 columnFamilyName cf 2024-12-09T13:33:50,215 DEBUG [StoreOpener-c75cdc52e2771b47f43c0c2b1619a6d5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:33:50,216 INFO [StoreOpener-c75cdc52e2771b47f43c0c2b1619a6d5-1 {}] regionserver.HStore(327): Store=c75cdc52e2771b47f43c0c2b1619a6d5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:33:50,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:50,217 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:50,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=110ef800bda238aca57583445728de38, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:33:50,218 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,218 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,220 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 110ef800bda238aca57583445728de38, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:33:50,223 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:33:50,223 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened c75cdc52e2771b47f43c0c2b1619a6d5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72674349, jitterRate=0.08293218910694122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:33:50,224 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,224 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for c75cdc52e2771b47f43c0c2b1619a6d5: Running coprocessor pre-open hook at 1733751230197Writing region info on filesystem at 1733751230197Initializing all the Stores at 1733751230199 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751230199Cleaning up temporary data from old regions at 1733751230218 (+19 ms)Running coprocessor post-open hooks at 1733751230224 (+6 ms)Region opened successfully at 1733751230224 2024-12-09T13:33:50,225 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5., pid=199, masterSystemTime=1733751230193 2024-12-09T13:33:50,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-12-09T13:33:50,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 110ef800bda238aca57583445728de38, server=c48e4b8eb196,36493,1733750968163 in 183 msec 2024-12-09T13:33:50,227 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:50,227 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:50,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, ASSIGN in 343 msec 2024-12-09T13:33:50,228 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=c75cdc52e2771b47f43c0c2b1619a6d5, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:33:50,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:33:50,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-12-09T13:33:50,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5, server=c48e4b8eb196,44683,1733750968041 in 191 msec 2024-12-09T13:33:50,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-12-09T13:33:50,236 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, ASSIGN in 351 msec 2024-12-09T13:33:50,237 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:33:50,238 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751230237"}]},"ts":"1733751230237"} 2024-12-09T13:33:50,240 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-09T13:33:50,241 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:33:50,241 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-09T13:33:50,246 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T13:33:50,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:50,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:50,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:50,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:33:50,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:33:50,313 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,313 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:33:50,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 481 msec 2024-12-09T13:33:50,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-09T13:33:50,463 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T13:33:50,463 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-09T13:33:50,463 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:50,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-12-09T13:33:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-09T13:33:50,470 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:50,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-09T13:33:50,470 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:33:50,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751230473 (current time:1733751230473). 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f66fc35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:50,474 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:50,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:50,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:50,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58a1c046, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:50,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:50,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,475 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52244, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:50,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47cc13c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:50,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:50,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:50,479 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46124, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:50,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:50,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:50,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,480 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:50,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73edbfa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:50,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:50,482 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:50,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:50,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:50,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@684af087, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:50,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:50,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,484 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52262, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:50,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@89457dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:50,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:50,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:50,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:50,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:50,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:33:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T13:33:50,491 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:50,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:50,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:33:50,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-09T13:33:50,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-09T13:33:50,494 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:50,495 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:50,498 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:50,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742234_1410 (size=185) 2024-12-09T13:33:50,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742234_1410 (size=185) 2024-12-09T13:33:50,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742234_1410 (size=185) 2024-12-09T13:33:50,523 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:50,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5}] 2024-12-09T13:33:50,525 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,525 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-09T13:33:50,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-09T13:33:50,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for c75cdc52e2771b47f43c0c2b1619a6d5: 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 110ef800bda238aca57583445728de38: 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:50,676 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:33:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742236_1412 (size=76) 2024-12-09T13:33:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742236_1412 (size=76) 2024-12-09T13:33:50,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742236_1412 (size=76) 2024-12-09T13:33:50,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:50,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-09T13:33:50,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-09T13:33:50,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,689 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5 in 166 msec 2024-12-09T13:33:50,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742235_1411 (size=76) 2024-12-09T13:33:50,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742235_1411 (size=76) 2024-12-09T13:33:50,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742235_1411 (size=76) 2024-12-09T13:33:50,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:50,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-09T13:33:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-09T13:33:50,697 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,697 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-09T13:33:50,701 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:50,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38 in 175 msec 2024-12-09T13:33:50,702 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:50,702 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:50,702 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:50,703 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:50,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742237_1413 (size=567) 2024-12-09T13:33:50,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742237_1413 (size=567) 2024-12-09T13:33:50,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742237_1413 (size=567) 2024-12-09T13:33:50,711 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:50,714 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:50,715 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:50,716 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:50,716 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-09T13:33:50,717 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 225 msec 2024-12-09T13:33:50,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-09T13:33:50,813 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T13:33:50,816 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='043907f7cd97fdca4e82d2f941d74e8a0', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:33:50,818 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1f49374d5cc96c271551d39be1d0adea7', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:50,819 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2568874d29e469c54365e914e7998a9d7', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:50,819 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='32c480ded3e8742c8b35ee22a524f0982', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:50,821 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='5ef4da181736b9dd349cef04fe3ebd3df', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:50,822 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='42d2c1097e8fdfaaf2f19f06d0a782467', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:33:50,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:50,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:33:50,825 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:33:50,827 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-09T13:33:50,827 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:50,827 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:33:50,829 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:33:50,834 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:33:50,840 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-09T13:33:50,843 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751230843 (current time:1733751230843). 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62d3ba7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:50,844 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:50,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:50,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:50,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59b89974, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:50,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:50,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,846 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52270, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:50,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8325a77, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:50,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:50,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:50,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46154, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:50,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:33:50,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:50,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,849 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@298fd01b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:33:50,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:33:50,850 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:33:50,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:33:50,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:33:50,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2933d268, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:33:50,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:33:50,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,852 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52286, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:33:50,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f52f933, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:33:50,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:33:50,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:33:50,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:33:50,855 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:33:50,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:33:50,857 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:33:50,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:33:50,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:33:50,857 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:33:50,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-09T13:33:50,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:33:50,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-09T13:33:50,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-09T13:33:50,860 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:33:50,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-09T13:33:50,861 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:33:50,863 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:33:50,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742238_1414 (size=180) 2024-12-09T13:33:50,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742238_1414 (size=180) 2024-12-09T13:33:50,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742238_1414 (size=180) 2024-12-09T13:33:50,868 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:33:50,868 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5}] 2024-12-09T13:33:50,869 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38 2024-12-09T13:33:50,869 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-09T13:33:51,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-09T13:33:51,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-09T13:33:51,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:51,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:51,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 110ef800bda238aca57583445728de38 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-09T13:33:51,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing c75cdc52e2771b47f43c0c2b1619a6d5 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-09T13:33:51,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/.tmp/cf/7ddd14fe0f864d089026679391f4c0de is 71, key is 02c4d4b8c2a9574acaa7f809de513f7a/cf:q/1733751230822/Put/seqid=0 2024-12-09T13:33:51,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/.tmp/cf/f8105c2790764628b4c3b091e641dea3 is 71, key is 11bd4692b5e19ffe460627219f235b7b/cf:q/1733751230824/Put/seqid=0 2024-12-09T13:33:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742239_1415 (size=5216) 2024-12-09T13:33:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742239_1415 (size=5216) 2024-12-09T13:33:51,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742239_1415 (size=5216) 2024-12-09T13:33:51,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/.tmp/cf/7ddd14fe0f864d089026679391f4c0de 2024-12-09T13:33:51,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/.tmp/cf/7ddd14fe0f864d089026679391f4c0de as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf/7ddd14fe0f864d089026679391f4c0de 2024-12-09T13:33:51,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742240_1416 (size=8392) 2024-12-09T13:33:51,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742240_1416 (size=8392) 2024-12-09T13:33:51,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742240_1416 (size=8392) 2024-12-09T13:33:51,068 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/.tmp/cf/f8105c2790764628b4c3b091e641dea3 2024-12-09T13:33:51,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf/7ddd14fe0f864d089026679391f4c0de, entries=2, sequenceid=6, filesize=5.1 K 2024-12-09T13:33:51,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 110ef800bda238aca57583445728de38 in 51ms, sequenceid=6, compaction requested=false 2024-12-09T13:33:51,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-09T13:33:51,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 110ef800bda238aca57583445728de38: 2024-12-09T13:33:51,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-09T13:33:51,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:51,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf/7ddd14fe0f864d089026679391f4c0de] hfiles 2024-12-09T13:33:51,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf/7ddd14fe0f864d089026679391f4c0de for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/.tmp/cf/f8105c2790764628b4c3b091e641dea3 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf/f8105c2790764628b4c3b091e641dea3 2024-12-09T13:33:51,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742241_1417 (size=115) 2024-12-09T13:33:51,080 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf/f8105c2790764628b4c3b091e641dea3, entries=48, sequenceid=6, filesize=8.2 K 2024-12-09T13:33:51,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:33:51,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742241_1417 (size=115) 2024-12-09T13:33:51,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-09T13:33:51,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742241_1417 (size=115) 2024-12-09T13:33:51,081 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for c75cdc52e2771b47f43c0c2b1619a6d5 in 60ms, sequenceid=6, compaction requested=false 2024-12-09T13:33:51,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for c75cdc52e2771b47f43c0c2b1619a6d5: 2024-12-09T13:33:51,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-09T13:33:51,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-09T13:33:51,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:33:51,082 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 110ef800bda238aca57583445728de38 2024-12-09T13:33:51,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf/f8105c2790764628b4c3b091e641dea3] hfiles 2024-12-09T13:33:51,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf/f8105c2790764628b4c3b091e641dea3 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,082 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38 2024-12-09T13:33:51,084 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 110ef800bda238aca57583445728de38 in 215 msec 2024-12-09T13:33:51,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742242_1418 (size=115) 2024-12-09T13:33:51,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742242_1418 (size=115) 2024-12-09T13:33:51,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742242_1418 (size=115) 2024-12-09T13:33:51,093 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:33:51,093 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-09T13:33:51,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-09T13:33:51,093 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:51,094 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:33:51,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-09T13:33:51,096 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:33:51,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5 in 226 msec 2024-12-09T13:33:51,097 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:33:51,098 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:33:51,098 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,098 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742243_1419 (size=645) 2024-12-09T13:33:51,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742243_1419 (size=645) 2024-12-09T13:33:51,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742243_1419 (size=645) 2024-12-09T13:33:51,115 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:33:51,121 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:33:51,121 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,122 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:33:51,122 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-09T13:33:51,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 264 msec 2024-12-09T13:33:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-09T13:33:51,173 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T13:33:51,173 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173 2024-12-09T13:33:51,173 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:51,203 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:33:51,203 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,204 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:33:51,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:33:51,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742244_1420 (size=185) 2024-12-09T13:33:51,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742244_1420 (size=185) 2024-12-09T13:33:51,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742244_1420 (size=185) 2024-12-09T13:33:51,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742245_1421 (size=567) 2024-12-09T13:33:51,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742245_1421 (size=567) 2024-12-09T13:33:51,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742245_1421 (size=567) 2024-12-09T13:33:51,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:51,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:51,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-10455723104048553271.jar 2024-12-09T13:33:52,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-15214166997159414072.jar 2024-12-09T13:33:52,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:33:52,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:33:52,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:33:52,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:33:52,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:33:52,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:33:52,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:33:52,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:33:52,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:33:52,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:33:52,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:33:52,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:33:52,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:52,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:52,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:33:52,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:52,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:33:52,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:33:52,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:33:52,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742246_1422 (size=5175431) 2024-12-09T13:33:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742246_1422 (size=5175431) 2024-12-09T13:33:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742246_1422 (size=5175431) 2024-12-09T13:33:52,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742247_1423 (size=1877034) 2024-12-09T13:33:52,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742247_1423 (size=1877034) 2024-12-09T13:33:52,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742247_1423 (size=1877034) 2024-12-09T13:33:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742248_1424 (size=20406) 2024-12-09T13:33:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742248_1424 (size=20406) 2024-12-09T13:33:52,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742248_1424 (size=20406) 2024-12-09T13:33:52,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742249_1425 (size=24096) 2024-12-09T13:33:52,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742249_1425 (size=24096) 2024-12-09T13:33:52,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742249_1425 (size=24096) 2024-12-09T13:33:52,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742250_1426 (size=503880) 2024-12-09T13:33:52,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742250_1426 (size=503880) 2024-12-09T13:33:52,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742250_1426 (size=503880) 2024-12-09T13:33:52,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742251_1427 (size=4695811) 2024-12-09T13:33:52,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742251_1427 (size=4695811) 2024-12-09T13:33:52,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742251_1427 (size=4695811) 2024-12-09T13:33:52,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742252_1428 (size=111872) 2024-12-09T13:33:52,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742252_1428 (size=111872) 2024-12-09T13:33:52,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742252_1428 (size=111872) 2024-12-09T13:33:52,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742253_1429 (size=1597213) 2024-12-09T13:33:52,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742253_1429 (size=1597213) 2024-12-09T13:33:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742253_1429 (size=1597213) 2024-12-09T13:33:52,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742254_1430 (size=4188619) 2024-12-09T13:33:52,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742254_1430 (size=4188619) 2024-12-09T13:33:52,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742254_1430 (size=4188619) 2024-12-09T13:33:52,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742255_1431 (size=127628) 2024-12-09T13:33:52,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742255_1431 (size=127628) 2024-12-09T13:33:52,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742255_1431 (size=127628) 2024-12-09T13:33:52,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742256_1432 (size=29229) 2024-12-09T13:33:52,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742256_1432 (size=29229) 2024-12-09T13:33:52,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742256_1432 (size=29229) 2024-12-09T13:33:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742257_1433 (size=45609) 2024-12-09T13:33:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742257_1433 (size=45609) 2024-12-09T13:33:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742257_1433 (size=45609) 2024-12-09T13:33:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742258_1434 (size=8360360) 2024-12-09T13:33:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742258_1434 (size=8360360) 2024-12-09T13:33:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742258_1434 (size=8360360) 2024-12-09T13:33:52,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742259_1435 (size=1323991) 2024-12-09T13:33:52,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742259_1435 (size=1323991) 2024-12-09T13:33:52,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742259_1435 (size=1323991) 2024-12-09T13:33:53,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742260_1436 (size=232957) 2024-12-09T13:33:53,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742260_1436 (size=232957) 2024-12-09T13:33:53,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742260_1436 (size=232957) 2024-12-09T13:33:53,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742261_1437 (size=217634) 2024-12-09T13:33:53,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742261_1437 (size=217634) 2024-12-09T13:33:53,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742261_1437 (size=217634) 2024-12-09T13:33:53,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742262_1438 (size=903930) 2024-12-09T13:33:53,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742262_1438 (size=903930) 2024-12-09T13:33:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742262_1438 (size=903930) 2024-12-09T13:33:53,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742263_1439 (size=77835) 2024-12-09T13:33:53,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742263_1439 (size=77835) 2024-12-09T13:33:53,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742263_1439 (size=77835) 2024-12-09T13:33:53,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742264_1440 (size=1832290) 2024-12-09T13:33:53,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742264_1440 (size=1832290) 2024-12-09T13:33:53,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742264_1440 (size=1832290) 2024-12-09T13:33:53,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742265_1441 (size=30949) 2024-12-09T13:33:53,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742265_1441 (size=30949) 2024-12-09T13:33:53,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742265_1441 (size=30949) 2024-12-09T13:33:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742266_1442 (size=6425017) 2024-12-09T13:33:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742266_1442 (size=6425017) 2024-12-09T13:33:53,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742266_1442 (size=6425017) 2024-12-09T13:33:53,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742267_1443 (size=136454) 2024-12-09T13:33:53,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742267_1443 (size=136454) 2024-12-09T13:33:53,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742267_1443 (size=136454) 2024-12-09T13:33:53,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742268_1444 (size=322274) 2024-12-09T13:33:53,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742268_1444 (size=322274) 2024-12-09T13:33:53,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742268_1444 (size=322274) 2024-12-09T13:33:53,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742269_1445 (size=131440) 2024-12-09T13:33:53,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742269_1445 (size=131440) 2024-12-09T13:33:53,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742269_1445 (size=131440) 2024-12-09T13:33:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742270_1446 (size=443172) 2024-12-09T13:33:53,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742270_1446 (size=443172) 2024-12-09T13:33:53,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742270_1446 (size=443172) 2024-12-09T13:33:53,170 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:33:53,171 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-09T13:33:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742271_1447 (size=7) 2024-12-09T13:33:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742271_1447 (size=7) 2024-12-09T13:33:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742271_1447 (size=7) 2024-12-09T13:33:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742272_1448 (size=10) 2024-12-09T13:33:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742272_1448 (size=10) 2024-12-09T13:33:53,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742272_1448 (size=10) 2024-12-09T13:33:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742273_1449 (size=304009) 2024-12-09T13:33:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742273_1449 (size=304009) 2024-12-09T13:33:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742273_1449 (size=304009) 2024-12-09T13:33:53,214 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:33:53,214 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:33:53,331 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0008_000001 (auth:SIMPLE) from 127.0.0.1:52702 2024-12-09T13:33:54,949 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:33:56,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:33:57,404 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0008_000001 (auth:SIMPLE) from 127.0.0.1:42856 2024-12-09T13:33:57,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-09T13:33:57,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-09T13:33:57,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-09T13:33:57,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742274_1450 (size=349683) 2024-12-09T13:33:57,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742274_1450 (size=349683) 2024-12-09T13:33:57,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742274_1450 (size=349683) 2024-12-09T13:33:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742275_1451 (size=8568) 2024-12-09T13:33:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742275_1451 (size=8568) 2024-12-09T13:33:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742275_1451 (size=8568) 2024-12-09T13:33:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742276_1452 (size=460) 2024-12-09T13:33:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742276_1452 (size=460) 2024-12-09T13:33:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742276_1452 (size=460) 2024-12-09T13:33:58,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742277_1453 (size=8568) 2024-12-09T13:33:58,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742277_1453 (size=8568) 2024-12-09T13:33:58,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742277_1453 (size=8568) 2024-12-09T13:33:58,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742278_1454 (size=349683) 2024-12-09T13:33:58,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742278_1454 (size=349683) 2024-12-09T13:33:58,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742278_1454 (size=349683) 2024-12-09T13:34:00,303 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:34:00,304 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:34:00,308 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:34:00,308 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:34:00,308 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:34:00,308 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:34:00,309 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-09T13:34:00,309 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-09T13:34:00,309 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:34:00,309 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-09T13:34:00,309 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751231173/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-09T13:34:00,313 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-09T13:34:00,316 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751240316"}]},"ts":"1733751240316"} 2024-12-09T13:34:00,318 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-09T13:34:00,318 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-09T13:34:00,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-09T13:34:00,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, UNASSIGN}] 2024-12-09T13:34:00,321 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, UNASSIGN 2024-12-09T13:34:00,321 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, UNASSIGN 2024-12-09T13:34:00,322 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=c75cdc52e2771b47f43c0c2b1619a6d5, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:34:00,322 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=110ef800bda238aca57583445728de38, regionState=CLOSING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:34:00,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, UNASSIGN because future has completed 2024-12-09T13:34:00,323 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:34:00,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 110ef800bda238aca57583445728de38, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:34:00,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, UNASSIGN because future has completed 2024-12-09T13:34:00,324 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:34:00,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:34:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-09T13:34:00,477 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 110ef800bda238aca57583445728de38 2024-12-09T13:34:00,477 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:34:00,478 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:34:00,478 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 110ef800bda238aca57583445728de38, disabling compactions & flushes 2024-12-09T13:34:00,478 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:34:00,478 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:34:00,478 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:34:00,478 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing c75cdc52e2771b47f43c0c2b1619a6d5, disabling compactions & flushes 2024-12-09T13:34:00,478 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. after waiting 0 ms 2024-12-09T13:34:00,479 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:34:00,479 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:34:00,479 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:34:00,479 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. after waiting 0 ms 2024-12-09T13:34:00,479 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:34:00,484 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:34:00,484 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:34:00,485 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:34:00,485 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:34:00,485 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38. 2024-12-09T13:34:00,485 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5. 2024-12-09T13:34:00,485 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for c75cdc52e2771b47f43c0c2b1619a6d5: Waiting for close lock at 1733751240478Running coprocessor pre-close hooks at 1733751240478Disabling compacts and flushes for region at 1733751240478Disabling writes for close at 1733751240479 (+1 ms)Writing region close event to WAL at 1733751240480 (+1 ms)Running coprocessor post-close hooks at 1733751240485 (+5 ms)Closed at 1733751240485 2024-12-09T13:34:00,485 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 110ef800bda238aca57583445728de38: Waiting for close lock at 1733751240478Running coprocessor pre-close hooks at 1733751240478Disabling compacts and flushes for region at 1733751240478Disabling writes for close at 1733751240478Writing region close event to WAL at 1733751240480 (+2 ms)Running coprocessor post-close hooks at 1733751240485 (+5 ms)Closed at 1733751240485 2024-12-09T13:34:00,486 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 110ef800bda238aca57583445728de38 2024-12-09T13:34:00,487 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=110ef800bda238aca57583445728de38, regionState=CLOSED 2024-12-09T13:34:00,487 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:34:00,488 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=c75cdc52e2771b47f43c0c2b1619a6d5, regionState=CLOSED 2024-12-09T13:34:00,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 110ef800bda238aca57583445728de38, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:34:00,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-09T13:34:00,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 110ef800bda238aca57583445728de38, server=c48e4b8eb196,36493,1733750968163 in 166 msec 2024-12-09T13:34:00,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:34:00,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=110ef800bda238aca57583445728de38, UNASSIGN in 171 msec 2024-12-09T13:34:00,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-09T13:34:00,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure c75cdc52e2771b47f43c0c2b1619a6d5, server=c48e4b8eb196,44683,1733750968041 in 168 msec 2024-12-09T13:34:00,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-09T13:34:00,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c75cdc52e2771b47f43c0c2b1619a6d5, UNASSIGN in 173 msec 2024-12-09T13:34:00,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-09T13:34:00,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 177 msec 2024-12-09T13:34:00,497 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751240497"}]},"ts":"1733751240497"} 2024-12-09T13:34:00,498 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-09T13:34:00,498 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-09T13:34:00,500 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 185 msec 2024-12-09T13:34:00,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-09T13:34:00,633 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T13:34:00,634 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,636 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,636 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,638 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,640 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:34:00,640 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38 2024-12-09T13:34:00,642 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/recovered.edits] 2024-12-09T13:34:00,642 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/recovered.edits] 2024-12-09T13:34:00,645 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf/f8105c2790764628b4c3b091e641dea3 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/cf/f8105c2790764628b4c3b091e641dea3 2024-12-09T13:34:00,645 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf/7ddd14fe0f864d089026679391f4c0de to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/cf/7ddd14fe0f864d089026679391f4c0de 2024-12-09T13:34:00,647 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5/recovered.edits/9.seqid 2024-12-09T13:34:00,647 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38/recovered.edits/9.seqid 2024-12-09T13:34:00,648 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/c75cdc52e2771b47f43c0c2b1619a6d5 2024-12-09T13:34:00,648 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testEmptyExportFileSystemState/110ef800bda238aca57583445728de38 2024-12-09T13:34:00,648 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-09T13:34:00,650 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,653 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-09T13:34:00,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-09T13:34:00,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-09T13:34:00,704 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-09T13:34:00,705 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-09T13:34:00,706 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,706 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-09T13:34:00,707 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751240706"}]},"ts":"9223372036854775807"} 2024-12-09T13:34:00,707 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751240706"}]},"ts":"9223372036854775807"} 2024-12-09T13:34:00,709 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:34:00,709 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 110ef800bda238aca57583445728de38, NAME => 'testtb-testEmptyExportFileSystemState,,1733751229832.110ef800bda238aca57583445728de38.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c75cdc52e2771b47f43c0c2b1619a6d5, NAME => 'testtb-testEmptyExportFileSystemState,1,1733751229832.c75cdc52e2771b47f43c0c2b1619a6d5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:34:00,709 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-09T13:34:00,709 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751240709"}]},"ts":"9223372036854775807"} 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:00,711 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-09T13:34:00,711 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:00,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:00,711 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-09T13:34:00,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-09T13:34:00,712 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:00,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:00,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:00,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:00,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 78 msec 2024-12-09T13:34:00,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-09T13:34:00,823 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-09T13:34:00,823 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-09T13:34:00,831 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-09T13:34:00,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-09T13:34:00,835 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-09T13:34:00,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-09T13:34:00,857 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=821 (was 808) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2101290072_1 at /127.0.0.1:45384 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 149304) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:34919 from appattempt_1733750974572_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:33210 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:45444 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:47464 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:39057 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39057 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2101290072_1 at /127.0.0.1:33188 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6805 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=825 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=528 (was 607), ProcessCount=17 (was 17), AvailableMemoryMB=1281 (was 1724) 2024-12-09T13:34:00,857 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=821 is superior to 500 2024-12-09T13:34:00,874 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=821, OpenFileDescriptor=825, MaxFileDescriptor=1048576, SystemLoadAverage=528, ProcessCount=18, AvailableMemoryMB=1279 2024-12-09T13:34:00,874 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=821 is superior to 500 2024-12-09T13:34:00,875 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:34:00,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:34:00,877 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:34:00,877 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:00,877 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-09T13:34:00,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-09T13:34:00,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:34:00,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742279_1455 (size=404) 2024-12-09T13:34:00,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742279_1455 (size=404) 2024-12-09T13:34:00,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742279_1455 (size=404) 2024-12-09T13:34:00,889 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9178341f170b995441dfe37d963f910d, NAME => 'testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:34:00,889 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 715af21e3a0a4ff556e6e5f0007c4552, NAME => 'testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:34:00,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742281_1457 (size=65) 2024-12-09T13:34:00,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742281_1457 (size=65) 2024-12-09T13:34:00,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742281_1457 (size=65) 2024-12-09T13:34:00,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742280_1456 (size=65) 2024-12-09T13:34:00,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742280_1456 (size=65) 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:34:00,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742280_1456 (size=65) 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 715af21e3a0a4ff556e6e5f0007c4552, disabling compactions & flushes 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:34:00,897 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 9178341f170b995441dfe37d963f910d, disabling compactions & flushes 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:00,897 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. after waiting 0 ms 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. after waiting 0 ms 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:00,897 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:00,897 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9178341f170b995441dfe37d963f910d: Waiting for close lock at 1733751240897Disabling compacts and flushes for region at 1733751240897Disabling writes for close at 1733751240897Writing region close event to WAL at 1733751240897Closed at 1733751240897 2024-12-09T13:34:00,897 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 715af21e3a0a4ff556e6e5f0007c4552: Waiting for close lock at 1733751240897Disabling compacts and flushes for region at 1733751240897Disabling writes for close at 1733751240897Writing region close event to WAL at 1733751240897Closed at 1733751240897 2024-12-09T13:34:00,898 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:34:00,899 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733751240898"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751240898"}]},"ts":"1733751240898"} 2024-12-09T13:34:00,899 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733751240898"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751240898"}]},"ts":"1733751240898"} 2024-12-09T13:34:00,901 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:34:00,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:34:00,902 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751240902"}]},"ts":"1733751240902"} 2024-12-09T13:34:00,904 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-09T13:34:00,904 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:34:00,905 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:34:00,905 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:34:00,905 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:34:00,905 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:34:00,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, ASSIGN}] 2024-12-09T13:34:00,906 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, ASSIGN 2024-12-09T13:34:00,906 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, ASSIGN 2024-12-09T13:34:00,907 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, ASSIGN; state=OFFLINE, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:34:00,907 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:34:00,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-09T13:34:01,058 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:34:01,058 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=715af21e3a0a4ff556e6e5f0007c4552, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:34:01,058 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=9178341f170b995441dfe37d963f910d, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:34:01,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, ASSIGN because future has completed 2024-12-09T13:34:01,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:34:01,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, ASSIGN because future has completed 2024-12-09T13:34:01,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9178341f170b995441dfe37d963f910d, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:34:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-09T13:34:01,215 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:01,215 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 715af21e3a0a4ff556e6e5f0007c4552, NAME => 'testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:34:01,215 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:01,215 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. service=AccessControlService 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 9178341f170b995441dfe37d963f910d, NAME => 'testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:34:01,216 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. service=AccessControlService 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,216 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:34:01,216 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:34:01,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,217 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,218 INFO [StoreOpener-715af21e3a0a4ff556e6e5f0007c4552-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,218 INFO [StoreOpener-9178341f170b995441dfe37d963f910d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,219 INFO [StoreOpener-715af21e3a0a4ff556e6e5f0007c4552-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 715af21e3a0a4ff556e6e5f0007c4552 columnFamilyName cf 2024-12-09T13:34:01,219 DEBUG [StoreOpener-715af21e3a0a4ff556e6e5f0007c4552-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:01,219 INFO [StoreOpener-9178341f170b995441dfe37d963f910d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9178341f170b995441dfe37d963f910d columnFamilyName cf 2024-12-09T13:34:01,219 DEBUG [StoreOpener-9178341f170b995441dfe37d963f910d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:01,220 INFO [StoreOpener-715af21e3a0a4ff556e6e5f0007c4552-1 {}] regionserver.HStore(327): Store=715af21e3a0a4ff556e6e5f0007c4552/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:34:01,220 INFO [StoreOpener-9178341f170b995441dfe37d963f910d-1 {}] regionserver.HStore(327): Store=9178341f170b995441dfe37d963f910d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:34:01,220 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,220 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,221 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,222 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,222 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,224 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:34:01,224 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:34:01,225 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 9178341f170b995441dfe37d963f910d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68946671, jitterRate=0.02738545835018158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:34:01,225 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 715af21e3a0a4ff556e6e5f0007c4552; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58919950, jitterRate=-0.12202432751655579}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:34:01,225 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,225 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,226 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 9178341f170b995441dfe37d963f910d: Running coprocessor pre-open hook at 1733751241217Writing region info on filesystem at 1733751241217Initializing all the Stores at 1733751241218 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751241218Cleaning up temporary data from old regions at 1733751241221 (+3 ms)Running coprocessor post-open hooks at 1733751241225 (+4 ms)Region opened successfully at 1733751241226 (+1 ms) 2024-12-09T13:34:01,226 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 715af21e3a0a4ff556e6e5f0007c4552: Running coprocessor pre-open hook at 1733751241216Writing region info on filesystem at 1733751241216Initializing all the Stores at 1733751241217 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751241217Cleaning up temporary data from old regions at 1733751241221 (+4 ms)Running coprocessor post-open hooks at 1733751241225 (+4 ms)Region opened successfully at 1733751241226 (+1 ms) 2024-12-09T13:34:01,226 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552., pid=216, masterSystemTime=1733751241211 2024-12-09T13:34:01,227 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d., pid=217, masterSystemTime=1733751241213 2024-12-09T13:34:01,228 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:01,228 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:01,228 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=9178341f170b995441dfe37d963f910d, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:34:01,228 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:01,228 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:01,229 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=c48e4b8eb196,39211,1733750968235, table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T13:34:01,229 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=715af21e3a0a4ff556e6e5f0007c4552, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:34:01,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9178341f170b995441dfe37d963f910d, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:34:01,231 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:34:01,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-12-09T13:34:01,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 9178341f170b995441dfe37d963f910d, server=c48e4b8eb196,39211,1733750968235 in 170 msec 2024-12-09T13:34:01,233 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, ASSIGN in 327 msec 2024-12-09T13:34:01,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-12-09T13:34:01,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552, server=c48e4b8eb196,44683,1733750968041 in 172 msec 2024-12-09T13:34:01,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-12-09T13:34:01,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, ASSIGN in 328 msec 2024-12-09T13:34:01,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:34:01,235 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751241235"}]},"ts":"1733751241235"} 2024-12-09T13:34:01,236 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-09T13:34:01,237 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:34:01,238 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-09T13:34:01,240 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T13:34:01,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:01,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:01,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:01,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:34:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,304 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,305 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:01,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 429 msec 2024-12-09T13:34:01,440 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-09T13:34:01,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-09T13:34:01,503 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T13:34:01,503 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-09T13:34:01,503 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:34:01,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-12-09T13:34:01,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-09T13:34:01,507 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:34:01,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-12-09T13:34:01,507 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T13:34:01,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751241510 (current time:1733751241510). 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c6eea34, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:34:01,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:34:01,511 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:34:01,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:34:01,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:34:01,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@742fb41f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:34:01,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:34:01,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,513 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43622, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:34:01,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7be9de1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:34:01,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:34:01,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:34:01,515 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:34:01,516 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:34:01,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:34:01,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,517 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:34:01,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5809df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:34:01,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:34:01,518 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:34:01,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:34:01,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:34:01,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ef92b7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:34:01,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:34:01,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,519 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:34:01,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33a93eed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:34:01,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:34:01,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:34:01,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36510, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:34:01,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,524 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:34:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:34:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,524 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:34:01,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T13:34:01,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:34:01,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T13:34:01,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-09T13:34:01,527 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:34:01,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-09T13:34:01,528 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:34:01,530 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:34:01,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742282_1458 (size=161) 2024-12-09T13:34:01,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742282_1458 (size=161) 2024-12-09T13:34:01,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742282_1458 (size=161) 2024-12-09T13:34:01,536 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:34:01,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d}] 2024-12-09T13:34:01,537 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,537 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-09T13:34:01,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-09T13:34:01,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 715af21e3a0a4ff556e6e5f0007c4552: 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 9178341f170b995441dfe37d963f910d: 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. for emptySnaptb0-testExportWithChecksum completed. 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. for emptySnaptb0-testExportWithChecksum completed. 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:34:01,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:34:01,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742283_1459 (size=68) 2024-12-09T13:34:01,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742283_1459 (size=68) 2024-12-09T13:34:01,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742283_1459 (size=68) 2024-12-09T13:34:01,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:01,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-09T13:34:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-09T13:34:01,709 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,709 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552 in 174 msec 2024-12-09T13:34:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742284_1460 (size=68) 2024-12-09T13:34:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742284_1460 (size=68) 2024-12-09T13:34:01,713 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:01,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742284_1460 (size=68) 2024-12-09T13:34:01,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-09T13:34:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-09T13:34:01,714 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,714 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:01,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-09T13:34:01,719 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:34:01,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d in 179 msec 2024-12-09T13:34:01,720 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:34:01,721 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:34:01,721 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-09T13:34:01,722 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-09T13:34:01,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742285_1461 (size=543) 2024-12-09T13:34:01,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742285_1461 (size=543) 2024-12-09T13:34:01,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742285_1461 (size=543) 2024-12-09T13:34:01,736 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:34:01,739 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:34:01,740 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-09T13:34:01,741 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:34:01,741 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-09T13:34:01,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 216 msec 2024-12-09T13:34:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-09T13:34:01,843 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T13:34:01,847 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='04486e46fd44085209360189be51fdd34', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552., hostname=c48e4b8eb196,44683,1733750968041, seqNum=2] 2024-12-09T13:34:01,848 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='19b384d22e6bef9a9a419c23a82786ebd', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,851 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='2c06b576cc4f927cd0457f5ba0d6061dc', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,852 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='340e3072a53a8ebd4142a4f04b0f53a48', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,853 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='5862020a463d0fac53d1dbe6cc7d6127e', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,854 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4da31b4fcf0671ca2794a816431b3d290', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44683 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:34:01,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:34:01,858 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T13:34:01,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-09T13:34:01,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:01,862 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:34:01,864 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T13:34:01,871 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T13:34:01,878 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-09T13:34:01,881 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T13:34:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751241881 (current time:1733751241881). 2024-12-09T13:34:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:34:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-09T13:34:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:34:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2610b580, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:34:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:34:01,883 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:34:01,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:34:01,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:34:01,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150a4b89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:34:01,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:34:01,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,884 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:34:01,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f1b1cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:34:01,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:34:01,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:34:01,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:34:01,889 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:34:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:34:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:34:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45ce1cd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:34:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:34:01,891 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:34:01,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:34:01,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:34:01,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2400248d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:34:01,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:34:01,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,893 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:34:01,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3777f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:34:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:34:01,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1] 2024-12-09T13:34:01,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:34:01,896 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:34:01,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:34:01,899 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049. 2024-12-09T13:34:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:34:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:34:01,899 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:34:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T13:34:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:34:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-09T13:34:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-09T13:34:01,902 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:34:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-09T13:34:01,903 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:34:01,905 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:34:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742286_1462 (size=156) 2024-12-09T13:34:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742286_1462 (size=156) 2024-12-09T13:34:01,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742286_1462 (size=156) 2024-12-09T13:34:01,923 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:34:01,923 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d}] 2024-12-09T13:34:01,924 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:01,925 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-09T13:34:02,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44683 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-09T13:34:02,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-09T13:34:02,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:02,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 715af21e3a0a4ff556e6e5f0007c4552 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-09T13:34:02,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:02,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 9178341f170b995441dfe37d963f910d 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-09T13:34:02,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/.tmp/cf/cd3fee1f23dc46adb9da4f03cedde158 is 71, key is 1a74443c7257971d28a714ac4f1c287f/cf:q/1733751241857/Put/seqid=0 2024-12-09T13:34:02,105 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/.tmp/cf/cb929f6039494a29b4bfaa09df104f6b is 71, key is 03c1e92f0aabdccdc559d2e3249130ee/cf:q/1733751241855/Put/seqid=0 2024-12-09T13:34:02,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742287_1463 (size=8326) 2024-12-09T13:34:02,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742287_1463 (size=8326) 2024-12-09T13:34:02,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742287_1463 (size=8326) 2024-12-09T13:34:02,108 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/.tmp/cf/cd3fee1f23dc46adb9da4f03cedde158 2024-12-09T13:34:02,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/.tmp/cf/cd3fee1f23dc46adb9da4f03cedde158 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 2024-12-09T13:34:02,124 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158, entries=47, sequenceid=6, filesize=8.1 K 2024-12-09T13:34:02,127 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 9178341f170b995441dfe37d963f910d in 49ms, sequenceid=6, compaction requested=false 2024-12-09T13:34:02,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 9178341f170b995441dfe37d963f910d: 2024-12-09T13:34:02,127 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. for snaptb0-testExportWithChecksum completed. 2024-12-09T13:34:02,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-09T13:34:02,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:34:02,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158] hfiles 2024-12-09T13:34:02,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 for snapshot=snaptb0-testExportWithChecksum 2024-12-09T13:34:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742288_1464 (size=5286) 2024-12-09T13:34:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742288_1464 (size=5286) 2024-12-09T13:34:02,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742288_1464 (size=5286) 2024-12-09T13:34:02,132 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/.tmp/cf/cb929f6039494a29b4bfaa09df104f6b 2024-12-09T13:34:02,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/.tmp/cf/cb929f6039494a29b4bfaa09df104f6b as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b 2024-12-09T13:34:02,143 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b, entries=3, sequenceid=6, filesize=5.2 K 2024-12-09T13:34:02,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 715af21e3a0a4ff556e6e5f0007c4552 in 67ms, sequenceid=6, compaction requested=false 2024-12-09T13:34:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 715af21e3a0a4ff556e6e5f0007c4552: 2024-12-09T13:34:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. for snaptb0-testExportWithChecksum completed. 2024-12-09T13:34:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-09T13:34:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:34:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b] hfiles 2024-12-09T13:34:02,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b for snapshot=snaptb0-testExportWithChecksum 2024-12-09T13:34:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742289_1465 (size=107) 2024-12-09T13:34:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742289_1465 (size=107) 2024-12-09T13:34:02,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742289_1465 (size=107) 2024-12-09T13:34:02,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:34:02,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-09T13:34:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-09T13:34:02,155 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:02,156 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d 2024-12-09T13:34:02,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9178341f170b995441dfe37d963f910d in 233 msec 2024-12-09T13:34:02,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742290_1466 (size=107) 2024-12-09T13:34:02,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742290_1466 (size=107) 2024-12-09T13:34:02,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742290_1466 (size=107) 2024-12-09T13:34:02,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:34:02,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-09T13:34:02,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-09T13:34:02,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:02,171 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:34:02,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-09T13:34:02,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552 in 248 msec 2024-12-09T13:34:02,173 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:34:02,174 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:34:02,175 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:34:02,175 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-09T13:34:02,175 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T13:34:02,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742291_1467 (size=621) 2024-12-09T13:34:02,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742291_1467 (size=621) 2024-12-09T13:34:02,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742291_1467 (size=621) 2024-12-09T13:34:02,201 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:34:02,209 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:34:02,209 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-09T13:34:02,211 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:34:02,211 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-09T13:34:02,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 311 msec 2024-12-09T13:34:02,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-09T13:34:02,223 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T13:34:02,224 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224 2024-12-09T13:34:02,224 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:34:02,265 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:34:02,265 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@34003054, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T13:34:02,267 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:34:02,273 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T13:34:02,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:02,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:02,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:02,987 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:34:03,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-5198114288147119383.jar 2024-12-09T13:34:03,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-14320644386987309390.jar 2024-12-09T13:34:03,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:34:03,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:34:03,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:34:03,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:34:03,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:34:03,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:34:03,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:34:03,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:34:03,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:34:03,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:34:03,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:34:03,293 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:34:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742292_1468 (size=5175431) 2024-12-09T13:34:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742292_1468 (size=5175431) 2024-12-09T13:34:03,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742292_1468 (size=5175431) 2024-12-09T13:34:03,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742293_1469 (size=1877034) 2024-12-09T13:34:03,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742293_1469 (size=1877034) 2024-12-09T13:34:03,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742293_1469 (size=1877034) 2024-12-09T13:34:03,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742294_1470 (size=20406) 2024-12-09T13:34:03,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742294_1470 (size=20406) 2024-12-09T13:34:03,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742294_1470 (size=20406) 2024-12-09T13:34:03,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742295_1471 (size=24096) 2024-12-09T13:34:03,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742295_1471 (size=24096) 2024-12-09T13:34:03,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742295_1471 (size=24096) 2024-12-09T13:34:03,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742296_1472 (size=503880) 2024-12-09T13:34:03,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742296_1472 (size=503880) 2024-12-09T13:34:03,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742296_1472 (size=503880) 2024-12-09T13:34:03,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742297_1473 (size=4695811) 2024-12-09T13:34:03,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742297_1473 (size=4695811) 2024-12-09T13:34:03,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742297_1473 (size=4695811) 2024-12-09T13:34:03,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742298_1474 (size=111872) 2024-12-09T13:34:03,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742298_1474 (size=111872) 2024-12-09T13:34:03,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742298_1474 (size=111872) 2024-12-09T13:34:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742299_1475 (size=1597213) 2024-12-09T13:34:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742299_1475 (size=1597213) 2024-12-09T13:34:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742299_1475 (size=1597213) 2024-12-09T13:34:03,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742300_1476 (size=4188619) 2024-12-09T13:34:03,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742300_1476 (size=4188619) 2024-12-09T13:34:03,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742300_1476 (size=4188619) 2024-12-09T13:34:03,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742301_1477 (size=127628) 2024-12-09T13:34:03,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742301_1477 (size=127628) 2024-12-09T13:34:03,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742301_1477 (size=127628) 2024-12-09T13:34:03,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742302_1478 (size=29229) 2024-12-09T13:34:03,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742302_1478 (size=29229) 2024-12-09T13:34:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742302_1478 (size=29229) 2024-12-09T13:34:03,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742303_1479 (size=45609) 2024-12-09T13:34:03,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742303_1479 (size=45609) 2024-12-09T13:34:03,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742303_1479 (size=45609) 2024-12-09T13:34:03,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742304_1480 (size=8360360) 2024-12-09T13:34:03,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742304_1480 (size=8360360) 2024-12-09T13:34:03,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742304_1480 (size=8360360) 2024-12-09T13:34:03,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742305_1481 (size=1323991) 2024-12-09T13:34:03,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742305_1481 (size=1323991) 2024-12-09T13:34:03,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742305_1481 (size=1323991) 2024-12-09T13:34:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742306_1482 (size=232957) 2024-12-09T13:34:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742306_1482 (size=232957) 2024-12-09T13:34:03,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742306_1482 (size=232957) 2024-12-09T13:34:03,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742307_1483 (size=217634) 2024-12-09T13:34:03,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742307_1483 (size=217634) 2024-12-09T13:34:03,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742307_1483 (size=217634) 2024-12-09T13:34:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742308_1484 (size=903930) 2024-12-09T13:34:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742308_1484 (size=903930) 2024-12-09T13:34:03,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742308_1484 (size=903930) 2024-12-09T13:34:03,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742309_1485 (size=77835) 2024-12-09T13:34:03,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742309_1485 (size=77835) 2024-12-09T13:34:03,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742309_1485 (size=77835) 2024-12-09T13:34:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742310_1486 (size=443172) 2024-12-09T13:34:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742310_1486 (size=443172) 2024-12-09T13:34:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742310_1486 (size=443172) 2024-12-09T13:34:04,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742311_1487 (size=1832290) 2024-12-09T13:34:04,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742311_1487 (size=1832290) 2024-12-09T13:34:04,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742311_1487 (size=1832290) 2024-12-09T13:34:04,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742312_1488 (size=30949) 2024-12-09T13:34:04,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742312_1488 (size=30949) 2024-12-09T13:34:04,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742312_1488 (size=30949) 2024-12-09T13:34:04,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742313_1489 (size=136454) 2024-12-09T13:34:04,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742313_1489 (size=136454) 2024-12-09T13:34:04,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742313_1489 (size=136454) 2024-12-09T13:34:04,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742314_1490 (size=322274) 2024-12-09T13:34:04,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742314_1490 (size=322274) 2024-12-09T13:34:04,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742314_1490 (size=322274) 2024-12-09T13:34:04,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742315_1491 (size=131440) 2024-12-09T13:34:04,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742315_1491 (size=131440) 2024-12-09T13:34:04,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742315_1491 (size=131440) 2024-12-09T13:34:04,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742316_1492 (size=6425017) 2024-12-09T13:34:04,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742316_1492 (size=6425017) 2024-12-09T13:34:04,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742316_1492 (size=6425017) 2024-12-09T13:34:04,098 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:34:04,100 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-09T13:34:04,102 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-09T13:34:04,103 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-09T13:34:04,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742317_1493 (size=441) 2024-12-09T13:34:04,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742317_1493 (size=441) 2024-12-09T13:34:04,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742317_1493 (size=441) 2024-12-09T13:34:04,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742318_1494 (size=21) 2024-12-09T13:34:04,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742318_1494 (size=21) 2024-12-09T13:34:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742318_1494 (size=21) 2024-12-09T13:34:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742319_1495 (size=304150) 2024-12-09T13:34:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742319_1495 (size=304150) 2024-12-09T13:34:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742319_1495 (size=304150) 2024-12-09T13:34:04,604 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:34:04,604 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:34:04,609 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0008_000001 (auth:SIMPLE) from 127.0.0.1:50740 2024-12-09T13:34:04,617 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_3/usercache/jenkins/appcache/application_1733750974572_0008/container_1733750974572_0008_01_000001/launch_container.sh] 2024-12-09T13:34:04,618 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_3/usercache/jenkins/appcache/application_1733750974572_0008/container_1733750974572_0008_01_000001/container_tokens] 2024-12-09T13:34:04,618 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_3/usercache/jenkins/appcache/application_1733750974572_0008/container_1733750974572_0008_01_000001/sysfs] 2024-12-09T13:34:05,414 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:56990 2024-12-09T13:34:07,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-09T13:34:07,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-09T13:34:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-09T13:34:12,483 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:60412 2024-12-09T13:34:12,987 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:34:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742320_1496 (size=349848) 2024-12-09T13:34:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742320_1496 (size=349848) 2024-12-09T13:34:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742320_1496 (size=349848) 2024-12-09T13:34:14,760 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:50670 2024-12-09T13:34:14,760 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:40324 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/archive/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T13:34:20,618 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:40332 2024-12-09T13:34:21,830 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000002/launch_container.sh] 2024-12-09T13:34:21,830 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000002/container_tokens] 2024-12-09T13:34:21,830 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/archive/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T13:34:22,732 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f0043ab8119e670f2bd7cf64ad44aea8, had cached 0 bytes from a total of 8188 2024-12-09T13:34:22,733 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b965e8c57383b84f23fb09c5323ac145, had cached 0 bytes from a total of 5422 2024-12-09T13:34:23,651 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:39972 2024-12-09T13:34:24,728 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000003/launch_container.sh] 2024-12-09T13:34:24,728 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000003/container_tokens] 2024-12-09T13:34:24,728 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000003/sysfs] 2024-12-09T13:34:26,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:34:27,059 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000004/launch_container.sh] 2024-12-09T13:34:27,059 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000004/container_tokens] 2024-12-09T13:34:27,059 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/archive/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T13:34:28,133 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=920.55 KB, freeSize=879.10 MB, max=880 MB, blockCount=5, accesses=7, hits=2, hitRatio=28.57%, , cachingAccesses=7, cachingHits=2, cachingHitsRatio=28.57%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-09T13:34:28,197 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-09T13:34:28,262 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-09T13:34:28,393 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T13:34:28,393 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-09T13:34:28,679 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:39980 2024-12-09T13:34:29,743 INFO [regionserver/c48e4b8eb196:0.Chore.2 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-09T13:34:29,743 INFO [regionserver/c48e4b8eb196:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-09T13:34:29,744 INFO [regionserver/c48e4b8eb196:0.Chore.2 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-09T13:34:30,319 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000005/launch_container.sh] 2024-12-09T13:34:30,319 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000005/container_tokens] 2024-12-09T13:34:30,319 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000005/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/archive/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T13:34:30,949 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b965e8c57383b84f23fb09c5323ac145 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:34:30,949 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f0043ab8119e670f2bd7cf64ad44aea8 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:34:30,949 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9178341f170b995441dfe37d963f910d changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:34:30,949 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 715af21e3a0a4ff556e6e5f0007c4552 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:34:30,949 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-09T13:34:30,950 INFO [master/c48e4b8eb196:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-09T13:34:30,950 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 1 regions 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 4 regions 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:34:30,951 INFO [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:34:30,951 INFO [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:34:30,951 INFO [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:34:30,951 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-09T13:34:30,955 INFO [master/c48e4b8eb196:0.Chore.1 {}] balancer.StochasticLoadBalancer(376): Running balancer because cluster has sloppy server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-09T13:34:30,955 INFO [master/c48e4b8eb196:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42833607907743, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-12-09T13:34:31,243 INFO [regionserver/c48e4b8eb196:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 137453 ms 2024-12-09T13:34:31,244 INFO [master/c48e4b8eb196:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 292 ms to try 14400 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.42833607907743 to a new imbalance of 0.017783957837847097. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8404338767081295, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8518719381398412, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-09T13:34:31,249 INFO [master/c48e4b8eb196:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 2, the balance interval is 150000 ms, and the max number regions in transition is 6 2024-12-09T13:34:31,250 INFO [master/c48e4b8eb196:0.Chore.1 {}] master.HMaster(2172): balance hri=1588230740, source=c48e4b8eb196,39211,1733750968235, destination=c48e4b8eb196,36493,1733750968163 2024-12-09T13:34:31,251 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T13:34:31,252 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T13:34:31,253 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:34:31,254 INFO [PEWorker-1 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c48e4b8eb196,39211,1733750968235, state=CLOSING 2024-12-09T13:34:31,278 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=c48e4b8eb196,39211,1733750968235, table=hbase:meta, region=1588230740. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T13:34:31,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:31,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:31,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:31,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:31,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:31,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:31,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:31,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:31,310 DEBUG [PEWorker-1 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T13:34:31,310 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:34:31,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1588230740, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:34:31,485 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(122): Close 1588230740 2024-12-09T13:34:31,485 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:34:31,485 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T13:34:31,485 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T13:34:31,485 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T13:34:31,485 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T13:34:31,485 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T13:34:31,486 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=76.94 KB heapSize=121.80 KB 2024-12-09T13:34:31,525 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/info/05be33031c1e4fdc8eed7c28d183b049 is 181, key is testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d./info:regioninfo/1733751241228/Put/seqid=0 2024-12-09T13:34:31,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742322_1498 (size=17550) 2024-12-09T13:34:31,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742322_1498 (size=17550) 2024-12-09T13:34:31,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742322_1498 (size=17550) 2024-12-09T13:34:31,595 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/info/05be33031c1e4fdc8eed7c28d183b049 2024-12-09T13:34:31,627 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/ns/50e7adc7ab7c4393bf2b5605c085f192 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584./ns:/1733751215618/DeleteFamily/seqid=0 2024-12-09T13:34:31,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742323_1499 (size=7924) 2024-12-09T13:34:31,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742323_1499 (size=7924) 2024-12-09T13:34:31,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742323_1499 (size=7924) 2024-12-09T13:34:31,658 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/ns/50e7adc7ab7c4393bf2b5605c085f192 2024-12-09T13:34:31,676 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:37594 2024-12-09T13:34:31,700 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/rep_barrier/9c065cd7ed3c4d119cde12f84e53795c is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584./rep_barrier:/1733751215618/DeleteFamily/seqid=0 2024-12-09T13:34:31,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742324_1500 (size=8195) 2024-12-09T13:34:31,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742324_1500 (size=8195) 2024-12-09T13:34:31,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742324_1500 (size=8195) 2024-12-09T13:34:31,770 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/rep_barrier/9c065cd7ed3c4d119cde12f84e53795c 2024-12-09T13:34:31,833 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/table/62f40ba63c5b40a3b6de339632b67a01 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733751197137.941fdb94a7ed0c6a947cad68d03d6584./table:/1733751215618/DeleteFamily/seqid=0 2024-12-09T13:34:31,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742325_1501 (size=9051) 2024-12-09T13:34:31,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742325_1501 (size=9051) 2024-12-09T13:34:31,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742325_1501 (size=9051) 2024-12-09T13:34:31,873 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/table/62f40ba63c5b40a3b6de339632b67a01 2024-12-09T13:34:31,889 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/info/05be33031c1e4fdc8eed7c28d183b049 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/info/05be33031c1e4fdc8eed7c28d183b049 2024-12-09T13:34:31,896 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/info/05be33031c1e4fdc8eed7c28d183b049, entries=96, sequenceid=211, filesize=17.1 K 2024-12-09T13:34:31,897 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/ns/50e7adc7ab7c4393bf2b5605c085f192 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/ns/50e7adc7ab7c4393bf2b5605c085f192 2024-12-09T13:34:31,902 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/ns/50e7adc7ab7c4393bf2b5605c085f192, entries=24, sequenceid=211, filesize=7.7 K 2024-12-09T13:34:31,904 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/rep_barrier/9c065cd7ed3c4d119cde12f84e53795c as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/rep_barrier/9c065cd7ed3c4d119cde12f84e53795c 2024-12-09T13:34:31,917 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/rep_barrier/9c065cd7ed3c4d119cde12f84e53795c, entries=22, sequenceid=211, filesize=8.0 K 2024-12-09T13:34:31,918 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/table/62f40ba63c5b40a3b6de339632b67a01 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/table/62f40ba63c5b40a3b6de339632b67a01 2024-12-09T13:34:31,925 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/table/62f40ba63c5b40a3b6de339632b67a01, entries=39, sequenceid=211, filesize=8.8 K 2024-12-09T13:34:31,926 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 441ms, sequenceid=211, compaction requested=false 2024-12-09T13:34:31,955 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/recovered.edits/214.seqid, newMaxSeqId=214, maxSeqId=1 2024-12-09T13:34:31,956 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:34:31,956 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T13:34:31,956 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T13:34:31,956 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733751271485Running coprocessor pre-close hooks at 1733751271485Disabling compacts and flushes for region at 1733751271485Disabling writes for close at 1733751271485Obtaining lock to block concurrent updates at 1733751271486 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733751271486Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=78789, getHeapSize=124664, getOffHeapSize=0, getCellsCount=595 at 1733751271486Flushing stores of hbase:meta,,1.1588230740 at 1733751271487 (+1 ms)Flushing 1588230740/info: creating writer at 1733751271487Flushing 1588230740/info: appending metadata at 1733751271524 (+37 ms)Flushing 1588230740/info: closing flushed file at 1733751271524Flushing 1588230740/ns: creating writer at 1733751271605 (+81 ms)Flushing 1588230740/ns: appending metadata at 1733751271627 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1733751271627Flushing 1588230740/rep_barrier: creating writer at 1733751271664 (+37 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733751271699 (+35 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733751271699Flushing 1588230740/table: creating writer at 1733751271777 (+78 ms)Flushing 1588230740/table: appending metadata at 1733751271833 (+56 ms)Flushing 1588230740/table: closing flushed file at 1733751271833Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bbdbd54: reopening flushed file at 1733751271888 (+55 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76bc0d2: reopening flushed file at 1733751271896 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e58bd50: reopening flushed file at 1733751271903 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@265c890d: reopening flushed file at 1733751271918 (+15 ms)Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 441ms, sequenceid=211, compaction requested=false at 1733751271926 (+8 ms)Writing region close event to WAL at 1733751271943 (+17 ms)Running coprocessor post-close hooks at 1733751271956 (+13 ms)Closed at 1733751271956 2024-12-09T13:34:31,957 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionServer(3302): Adding 1588230740 move to c48e4b8eb196,36493,1733750968163 record at close sequenceid=211 2024-12-09T13:34:31,961 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(157): Closed 1588230740 2024-12-09T13:34:31,961 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSED 2024-12-09T13:34:31,963 WARN [PEWorker-3 {}] zookeeper.MetaTableLocator(168): Tried to set null ServerName in hbase:meta; skipping -- ServerName required 2024-12-09T13:34:31,963 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=225, ppid=224, state=RUNNABLE, hasLock=true; CloseRegionProcedure 1588230740, server=c48e4b8eb196,39211,1733750968235 2024-12-09T13:34:31,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-09T13:34:31,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 1588230740, server=c48e4b8eb196,39211,1733750968235 in 654 msec 2024-12-09T13:34:31,967 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE; state=CLOSED, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:34:32,117 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T13:34:32,118 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:34:32,119 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c48e4b8eb196,36493,1733750968163, state=OPENING 2024-12-09T13:34:32,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,166 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-12-09T13:34:32,166 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:34:32,323 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T13:34:32,324 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T13:34:32,324 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T13:34:32,325 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c48e4b8eb196%2C36493%2C1733750968163.meta, suffix=.meta, logDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163, archiveDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs, maxLogs=32 2024-12-09T13:34:32,339 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163/c48e4b8eb196%2C36493%2C1733750968163.meta.1733751272326.meta, exclude list is [], retry=0 2024-12-09T13:34:32,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42277,DS-02dae94e-eebd-48dc-9af8-3635b1fd49f9,DISK] 2024-12-09T13:34:32,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39425,DS-f429e652-0d3e-448e-8297-4199c22c018a,DISK] 2024-12-09T13:34:32,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33239,DS-1c81e62a-c5b9-4b3f-97d4-2b43babdecac,DISK] 2024-12-09T13:34:32,347 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/WALs/c48e4b8eb196,36493,1733750968163/c48e4b8eb196%2C36493%2C1733750968163.meta.1733751272326.meta 2024-12-09T13:34:32,349 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34425:34425),(127.0.0.1/127.0.0.1:42881:42881),(127.0.0.1/127.0.0.1:42461:42461)] 2024-12-09T13:34:32,349 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:34:32,349 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-09T13:34:32,350 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:34:32,350 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T13:34:32,350 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T13:34:32,350 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T13:34:32,350 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T13:34:32,350 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:34:32,350 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T13:34:32,350 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T13:34:32,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T13:34:32,352 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T13:34:32,352 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:32,361 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/info/05be33031c1e4fdc8eed7c28d183b049 2024-12-09T13:34:32,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:34:32,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T13:34:32,362 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T13:34:32,362 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:32,370 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/ns/50e7adc7ab7c4393bf2b5605c085f192 2024-12-09T13:34:32,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:34:32,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T13:34:32,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T13:34:32,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:32,382 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/rep_barrier/9c065cd7ed3c4d119cde12f84e53795c 2024-12-09T13:34:32,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:34:32,382 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T13:34:32,383 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T13:34:32,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:32,388 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/table/62f40ba63c5b40a3b6de339632b67a01 2024-12-09T13:34:32,388 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T13:34:32,389 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T13:34:32,389 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740 2024-12-09T13:34:32,390 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740 2024-12-09T13:34:32,391 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T13:34:32,391 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T13:34:32,392 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T13:34:32,393 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T13:34:32,393 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=215; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69656251, jitterRate=0.037959024310112}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T13:34:32,393 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T13:34:32,394 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733751272350Writing region info on filesystem at 1733751272351 (+1 ms)Initializing all the Stores at 1733751272351Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733751272351Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733751272352 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751272352Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733751272352Cleaning up temporary data from old regions at 1733751272391 (+39 ms)Running coprocessor post-open hooks at 1733751272393 (+2 ms)Region opened successfully at 1733751272394 (+1 ms) 2024-12-09T13:34:32,395 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=226, masterSystemTime=1733751272321 2024-12-09T13:34:32,396 DEBUG [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T13:34:32,396 INFO [RS_OPEN_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T13:34:32,396 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=215, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:34:32,397 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c48e4b8eb196,36493,1733750968163, state=OPEN 2024-12-09T13:34:32,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T13:34:32,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,418 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=226, ppid=224, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c48e4b8eb196,36493,1733750968163 2024-12-09T13:34:32,418 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T13:34:32,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-09T13:34:32,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c48e4b8eb196,36493,1733750968163 in 252 msec 2024-12-09T13:34:32,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE in 1.1710 sec 2024-12-09T13:34:32,457 INFO [master/c48e4b8eb196:0.Chore.1 {}] master.HMaster(2172): balance hri=96d9654456b57311f15f8834c0cf2325, source=c48e4b8eb196,39211,1733750968235, destination=c48e4b8eb196,44683,1733750968041 2024-12-09T13:34:32,458 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, REOPEN/MOVE 2024-12-09T13:34:32,458 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, REOPEN/MOVE 2024-12-09T13:34:32,459 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=96d9654456b57311f15f8834c0cf2325, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:34:32,462 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] ipc.CallRunner(138): callId: 388 service: ClientService methodName: Mutate size: 257 connection: 172.17.0.2:45971 deadline: 1733751332460, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:34:32,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:34:32,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:34:32,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:34:32,499 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=c48e4b8eb196,39211,1733750968235, table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-09T13:34:32,588 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:34:32,589 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57193, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:34:32,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, REOPEN/MOVE because future has completed 2024-12-09T13:34:32,591 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:34:32,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:34:32,747 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:32,747 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:34:32,748 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 96d9654456b57311f15f8834c0cf2325, disabling compactions & flushes 2024-12-09T13:34:32,748 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:32,748 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:32,748 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. after waiting 0 ms 2024-12-09T13:34:32,748 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:32,748 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(2902): Flushing 96d9654456b57311f15f8834c0cf2325 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-12-09T13:34:32,771 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/.tmp/l/f83df68e56fe4e4481260dbbb726e885 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733751215601/DeleteFamily/seqid=0 2024-12-09T13:34:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742327_1503 (size=5791) 2024-12-09T13:34:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742327_1503 (size=5791) 2024-12-09T13:34:32,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742327_1503 (size=5791) 2024-12-09T13:34:32,806 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/.tmp/l/f83df68e56fe4e4481260dbbb726e885 2024-12-09T13:34:32,815 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f83df68e56fe4e4481260dbbb726e885 2024-12-09T13:34:32,817 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/.tmp/l/f83df68e56fe4e4481260dbbb726e885 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/l/f83df68e56fe4e4481260dbbb726e885 2024-12-09T13:34:32,822 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f83df68e56fe4e4481260dbbb726e885 2024-12-09T13:34:32,822 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/l/f83df68e56fe4e4481260dbbb726e885, entries=13, sequenceid=28, filesize=5.7 K 2024-12-09T13:34:32,824 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 96d9654456b57311f15f8834c0cf2325 in 76ms, sequenceid=28, compaction requested=false 2024-12-09T13:34:32,848 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/recovered.edits/31.seqid, newMaxSeqId=31, maxSeqId=1 2024-12-09T13:34:32,849 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:34:32,849 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:32,849 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 96d9654456b57311f15f8834c0cf2325: Waiting for close lock at 1733751272748Running coprocessor pre-close hooks at 1733751272748Disabling compacts and flushes for region at 1733751272748Disabling writes for close at 1733751272748Obtaining lock to block concurrent updates at 1733751272748Preparing flush snapshotting stores in 96d9654456b57311f15f8834c0cf2325 at 1733751272748Finished memstore snapshotting hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., syncing WAL and waiting on mvcc, flushsize=dataSize=1504, getHeapSize=3560, getOffHeapSize=0, getCellsCount=24 at 1733751272748Flushing stores of hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. at 1733751272749 (+1 ms)Flushing 96d9654456b57311f15f8834c0cf2325/l: creating writer at 1733751272749Flushing 96d9654456b57311f15f8834c0cf2325/l: appending metadata at 1733751272770 (+21 ms)Flushing 96d9654456b57311f15f8834c0cf2325/l: closing flushed file at 1733751272770Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48a378f9: reopening flushed file at 1733751272816 (+46 ms)Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 96d9654456b57311f15f8834c0cf2325 in 76ms, sequenceid=28, compaction requested=false at 1733751272824 (+8 ms)Writing region close event to WAL at 1733751272828 (+4 ms)Running coprocessor post-close hooks at 1733751272849 (+21 ms)Closed at 1733751272849 2024-12-09T13:34:32,849 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionServer(3302): Adding 96d9654456b57311f15f8834c0cf2325 move to c48e4b8eb196,44683,1733750968041 record at close sequenceid=28 2024-12-09T13:34:32,853 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:32,853 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=96d9654456b57311f15f8834c0cf2325, regionState=CLOSED 2024-12-09T13:34:32,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:34:32,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-09T13:34:32,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,39211,1733750968235 in 266 msec 2024-12-09T13:34:32,865 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, REOPEN/MOVE; state=CLOSED, location=c48e4b8eb196,44683,1733750968041; forceNewPlan=false, retain=false 2024-12-09T13:34:33,016 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T13:34:33,016 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=96d9654456b57311f15f8834c0cf2325, regionState=OPENING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:34:33,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, REOPEN/MOVE because future has completed 2024-12-09T13:34:33,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:34:33,179 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] handler.AssignRegionHandler(132): Open hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:33,179 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(7752): Opening region: {ENCODED => 96d9654456b57311f15f8834c0cf2325, NAME => 'hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.', STARTKEY => '', ENDKEY => ''} 2024-12-09T13:34:33,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. service=AccessControlService 2024-12-09T13:34:33,180 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:34:33,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(898): Instantiated hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:34:33,180 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(7794): checking encryption for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,181 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(7797): checking classloading for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,191 INFO [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,192 INFO [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96d9654456b57311f15f8834c0cf2325 columnFamilyName l 2024-12-09T13:34:33,193 DEBUG [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:34:33,211 INFO [StoreFileOpener-96d9654456b57311f15f8834c0cf2325-l-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f83df68e56fe4e4481260dbbb726e885 2024-12-09T13:34:33,211 DEBUG [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/l/f83df68e56fe4e4481260dbbb726e885 2024-12-09T13:34:33,211 INFO [StoreOpener-96d9654456b57311f15f8834c0cf2325-1 {}] regionserver.HStore(327): Store=96d9654456b57311f15f8834c0cf2325/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:34:33,212 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1038): replaying wal for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,212 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1048): stopping wal replay for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1060): Cleaning up temporary data for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,216 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1093): writing seq id for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,218 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1114): Opened 96d9654456b57311f15f8834c0cf2325; next sequenceid=32; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62282739, jitterRate=-0.07191486656665802}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:34:33,218 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:34:33,222 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-09T13:34:33,223 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-09T13:34:33,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-09T13:34:33,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-09T13:34:33,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-09T13:34:33,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-09T13:34:33,260 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,260 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,267 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,267 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:34:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:34:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:34:33,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:34:33,276 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegion(1006): Region open journal for 96d9654456b57311f15f8834c0cf2325: Running coprocessor pre-open hook at 1733751273181Writing region info on filesystem at 1733751273181Initializing all the Stores at 1733751273186 (+5 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733751273187 (+1 ms)Cleaning up temporary data from old regions at 1733751273214 (+27 ms)Running coprocessor post-open hooks at 1733751273218 (+4 ms)Region opened successfully at 1733751273276 (+58 ms) 2024-12-09T13:34:33,277 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,277 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,277 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,277 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-09T13:34:33,277 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., pid=229, masterSystemTime=1733751273176 2024-12-09T13:34:33,282 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:33,282 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=229}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:34:33,283 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=96d9654456b57311f15f8834c0cf2325, regionState=OPEN, openSeqNum=32, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:34:33,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:34:33,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-12-09T13:34:33,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; OpenRegionProcedure 96d9654456b57311f15f8834c0cf2325, server=c48e4b8eb196,44683,1733750968041 in 266 msec 2024-12-09T13:34:33,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=96d9654456b57311f15f8834c0cf2325, REOPEN/MOVE in 835 msec 2024-12-09T13:34:33,388 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-09T13:34:33,407 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T13:34:33,421 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:34:33,421 DEBUG [master/c48e4b8eb196:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 96d9654456b57311f15f8834c0cf2325 changed from -1.0 to 0.0, refreshing cache 2024-12-09T13:34:33,441 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-09T13:34:33,441 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-09T13:34:34,326 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000006/launch_container.sh] 2024-12-09T13:34:34,326 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000006/container_tokens] 2024-12-09T13:34:34,326 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000006/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/archive/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T13:34:35,179 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000007/launch_container.sh] 2024-12-09T13:34:35,180 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000007/container_tokens] 2024-12-09T13:34:35,180 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/local-export-1733751242224/archive/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-09T13:34:35,675 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:37598 2024-12-09T13:34:36,101 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:34:36,680 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:37608 2024-12-09T13:34:38,310 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733750974572_0009_01_000010 while processing FINISH_CONTAINERS event 2024-12-09T13:34:38,390 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:37614 2024-12-09T13:34:38,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742321_1497 (size=30401) 2024-12-09T13:34:38,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742321_1497 (size=30401) 2024-12-09T13:34:38,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742321_1497 (size=30401) 2024-12-09T13:34:38,416 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733750974572_0009_01_000009 is : 143 2024-12-09T13:34:38,427 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000009/launch_container.sh] 2024-12-09T13:34:38,427 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000009/container_tokens] 2024-12-09T13:34:38,427 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_1/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000009/sysfs] 2024-12-09T13:34:38,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742328_1504 (size=460) 2024-12-09T13:34:38,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742328_1504 (size=460) 2024-12-09T13:34:38,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742328_1504 (size=460) 2024-12-09T13:34:38,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742329_1505 (size=30401) 2024-12-09T13:34:38,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742329_1505 (size=30401) 2024-12-09T13:34:38,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742329_1505 (size=30401) 2024-12-09T13:34:38,464 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000008/launch_container.sh] 2024-12-09T13:34:38,464 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000008/container_tokens] 2024-12-09T13:34:38,464 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_0/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000008/sysfs] 2024-12-09T13:34:38,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742330_1506 (size=349848) 2024-12-09T13:34:38,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742330_1506 (size=349848) 2024-12-09T13:34:38,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742330_1506 (size=349848) 2024-12-09T13:34:38,485 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:37620 2024-12-09T13:34:40,496 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733750974572_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:34:40,497 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497 2024-12-09T13:34:40,497 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:34:40,537 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:34:40,537 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T13:34:40,539 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:34:40,544 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-09T13:34:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742331_1507 (size=621) 2024-12-09T13:34:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742332_1508 (size=156) 2024-12-09T13:34:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742331_1507 (size=621) 2024-12-09T13:34:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742332_1508 (size=156) 2024-12-09T13:34:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742331_1507 (size=621) 2024-12-09T13:34:40,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742332_1508 (size=156) 2024-12-09T13:34:40,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:40,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:40,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-14374841101893547220.jar 2024-12-09T13:34:41,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-11596427194602468873.jar 2024-12-09T13:34:41,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:34:41,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:34:41,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:34:41,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:34:41,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:34:41,519 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:34:41,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:41,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:41,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:34:41,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:41,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:34:41,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:34:41,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:34:41,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742333_1509 (size=5175431) 2024-12-09T13:34:41,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742333_1509 (size=5175431) 2024-12-09T13:34:41,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742333_1509 (size=5175431) 2024-12-09T13:34:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742334_1510 (size=1877034) 2024-12-09T13:34:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742334_1510 (size=1877034) 2024-12-09T13:34:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742334_1510 (size=1877034) 2024-12-09T13:34:41,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742335_1511 (size=20406) 2024-12-09T13:34:41,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742335_1511 (size=20406) 2024-12-09T13:34:41,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742335_1511 (size=20406) 2024-12-09T13:34:41,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742336_1512 (size=24096) 2024-12-09T13:34:41,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742336_1512 (size=24096) 2024-12-09T13:34:41,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742336_1512 (size=24096) 2024-12-09T13:34:41,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742337_1513 (size=503880) 2024-12-09T13:34:41,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742337_1513 (size=503880) 2024-12-09T13:34:41,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742337_1513 (size=503880) 2024-12-09T13:34:41,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742338_1514 (size=4695811) 2024-12-09T13:34:41,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742338_1514 (size=4695811) 2024-12-09T13:34:41,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742338_1514 (size=4695811) 2024-12-09T13:34:41,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742339_1515 (size=111872) 2024-12-09T13:34:41,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742339_1515 (size=111872) 2024-12-09T13:34:41,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742339_1515 (size=111872) 2024-12-09T13:34:41,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742340_1516 (size=1597213) 2024-12-09T13:34:41,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742340_1516 (size=1597213) 2024-12-09T13:34:41,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742340_1516 (size=1597213) 2024-12-09T13:34:41,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742341_1517 (size=4188619) 2024-12-09T13:34:41,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742341_1517 (size=4188619) 2024-12-09T13:34:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742341_1517 (size=4188619) 2024-12-09T13:34:41,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742342_1518 (size=127628) 2024-12-09T13:34:41,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742342_1518 (size=127628) 2024-12-09T13:34:41,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742342_1518 (size=127628) 2024-12-09T13:34:42,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742343_1519 (size=29229) 2024-12-09T13:34:42,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742343_1519 (size=29229) 2024-12-09T13:34:42,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742343_1519 (size=29229) 2024-12-09T13:34:42,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742344_1520 (size=45609) 2024-12-09T13:34:42,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742344_1520 (size=45609) 2024-12-09T13:34:42,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742344_1520 (size=45609) 2024-12-09T13:34:42,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742345_1521 (size=8360360) 2024-12-09T13:34:42,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742345_1521 (size=8360360) 2024-12-09T13:34:42,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742345_1521 (size=8360360) 2024-12-09T13:34:42,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742346_1522 (size=1323991) 2024-12-09T13:34:42,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742346_1522 (size=1323991) 2024-12-09T13:34:42,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742346_1522 (size=1323991) 2024-12-09T13:34:42,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742347_1523 (size=232957) 2024-12-09T13:34:42,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742347_1523 (size=232957) 2024-12-09T13:34:42,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742347_1523 (size=232957) 2024-12-09T13:34:42,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742348_1524 (size=217634) 2024-12-09T13:34:42,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742348_1524 (size=217634) 2024-12-09T13:34:42,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742348_1524 (size=217634) 2024-12-09T13:34:42,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742349_1525 (size=903930) 2024-12-09T13:34:42,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742349_1525 (size=903930) 2024-12-09T13:34:42,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742349_1525 (size=903930) 2024-12-09T13:34:42,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742350_1526 (size=77835) 2024-12-09T13:34:42,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742350_1526 (size=77835) 2024-12-09T13:34:42,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742350_1526 (size=77835) 2024-12-09T13:34:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742351_1527 (size=1832290) 2024-12-09T13:34:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742351_1527 (size=1832290) 2024-12-09T13:34:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742351_1527 (size=1832290) 2024-12-09T13:34:42,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742352_1528 (size=6425017) 2024-12-09T13:34:42,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742352_1528 (size=6425017) 2024-12-09T13:34:42,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742352_1528 (size=6425017) 2024-12-09T13:34:42,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742353_1529 (size=30949) 2024-12-09T13:34:42,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742353_1529 (size=30949) 2024-12-09T13:34:42,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742353_1529 (size=30949) 2024-12-09T13:34:42,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742354_1530 (size=136454) 2024-12-09T13:34:42,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742354_1530 (size=136454) 2024-12-09T13:34:42,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742354_1530 (size=136454) 2024-12-09T13:34:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742355_1531 (size=443172) 2024-12-09T13:34:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742355_1531 (size=443172) 2024-12-09T13:34:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742355_1531 (size=443172) 2024-12-09T13:34:42,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742356_1532 (size=322274) 2024-12-09T13:34:42,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742356_1532 (size=322274) 2024-12-09T13:34:42,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742356_1532 (size=322274) 2024-12-09T13:34:42,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742357_1533 (size=131440) 2024-12-09T13:34:42,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742357_1533 (size=131440) 2024-12-09T13:34:42,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742357_1533 (size=131440) 2024-12-09T13:34:42,304 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:34:42,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-09T13:34:42,308 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-09T13:34:42,308 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-09T13:34:42,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742358_1534 (size=441) 2024-12-09T13:34:42,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742358_1534 (size=441) 2024-12-09T13:34:42,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742358_1534 (size=441) 2024-12-09T13:34:42,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742359_1535 (size=21) 2024-12-09T13:34:42,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742359_1535 (size=21) 2024-12-09T13:34:42,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742359_1535 (size=21) 2024-12-09T13:34:42,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742360_1536 (size=304102) 2024-12-09T13:34:42,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742360_1536 (size=304102) 2024-12-09T13:34:42,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742360_1536 (size=304102) 2024-12-09T13:34:44,536 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:34:44,537 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:34:44,538 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0009_000001 (auth:SIMPLE) from 127.0.0.1:52746 2024-12-09T13:34:44,548 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000001/launch_container.sh] 2024-12-09T13:34:44,548 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000001/container_tokens] 2024-12-09T13:34:44,548 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0009/container_1733750974572_0009_01_000001/sysfs] 2024-12-09T13:34:45,312 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733750974572_0009_01_000011 while processing FINISH_CONTAINERS event 2024-12-09T13:34:45,316 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:55108 2024-12-09T13:34:46,216 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 715af21e3a0a4ff556e6e5f0007c4552, had cached 0 bytes from a total of 5286 2024-12-09T13:34:46,216 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9178341f170b995441dfe37d963f910d, had cached 0 bytes from a total of 8326 2024-12-09T13:34:54,094 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:43680 2024-12-09T13:34:54,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742361_1537 (size=349800) 2024-12-09T13:34:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742361_1537 (size=349800) 2024-12-09T13:34:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742361_1537 (size=349800) 2024-12-09T13:34:56,022 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:34:56,271 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:46068 2024-12-09T13:34:56,271 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:56846 2024-12-09T13:34:59,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742362_1538 (size=8326) 2024-12-09T13:34:59,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742362_1538 (size=8326) 2024-12-09T13:34:59,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742362_1538 (size=8326) 2024-12-09T13:34:59,448 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000002/launch_container.sh] 2024-12-09T13:34:59,448 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000002/container_tokens] 2024-12-09T13:34:59,448 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_2/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000002/sysfs] 2024-12-09T13:34:59,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742364_1540 (size=5286) 2024-12-09T13:34:59,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742364_1540 (size=5286) 2024-12-09T13:34:59,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742364_1540 (size=5286) 2024-12-09T13:34:59,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742363_1539 (size=22150) 2024-12-09T13:34:59,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742363_1539 (size=22150) 2024-12-09T13:34:59,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742363_1539 (size=22150) 2024-12-09T13:34:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742365_1541 (size=462) 2024-12-09T13:34:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742365_1541 (size=462) 2024-12-09T13:34:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742365_1541 (size=462) 2024-12-09T13:34:59,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742366_1542 (size=22150) 2024-12-09T13:34:59,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742366_1542 (size=22150) 2024-12-09T13:34:59,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742366_1542 (size=22150) 2024-12-09T13:34:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742367_1543 (size=349800) 2024-12-09T13:34:59,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742367_1543 (size=349800) 2024-12-09T13:34:59,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742367_1543 (size=349800) 2024-12-09T13:35:00,001 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:46082 2024-12-09T13:35:00,009 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:56848 2024-12-09T13:35:00,025 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733750974572_0010_01_000003 is : 143 2024-12-09T13:35:00,027 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000003/sysfs] 2024-12-09T13:35:01,537 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:35:01,538 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:35:01,543 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-09T13:35:01,543 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:35:01,544 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:35:01,544 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-09T13:35:01,544 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-09T13:35:01,544 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-09T13:35:01,544 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-09T13:35:01,544 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-09T13:35:01,544 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751280497/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-09T13:35:01,550 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-09T13:35:01,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-09T13:35:01,553 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751301553"}]},"ts":"1733751301553"} 2024-12-09T13:35:01,555 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-09T13:35:01,555 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-09T13:35:01,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-09T13:35:01,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, UNASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, UNASSIGN}] 2024-12-09T13:35:01,558 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, UNASSIGN 2024-12-09T13:35:01,558 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, UNASSIGN 2024-12-09T13:35:01,559 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=715af21e3a0a4ff556e6e5f0007c4552, regionState=CLOSING, regionLocation=c48e4b8eb196,44683,1733750968041 2024-12-09T13:35:01,559 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=9178341f170b995441dfe37d963f910d, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:35:01,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, UNASSIGN because future has completed 2024-12-09T13:35:01,561 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:35:01,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9178341f170b995441dfe37d963f910d, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:35:01,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, UNASSIGN because future has completed 2024-12-09T13:35:01,562 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:35:01,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552, server=c48e4b8eb196,44683,1733750968041}] 2024-12-09T13:35:01,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-09T13:35:01,714 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(122): Close 9178341f170b995441dfe37d963f910d 2024-12-09T13:35:01,714 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:35:01,714 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1722): Closing 9178341f170b995441dfe37d963f910d, disabling compactions & flushes 2024-12-09T13:35:01,714 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:35:01,714 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:35:01,714 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. after waiting 0 ms 2024-12-09T13:35:01,714 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:35:01,715 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(122): Close 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:35:01,715 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:35:01,715 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1722): Closing 715af21e3a0a4ff556e6e5f0007c4552, disabling compactions & flushes 2024-12-09T13:35:01,715 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:35:01,715 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:35:01,715 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. after waiting 0 ms 2024-12-09T13:35:01,715 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:35:01,718 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:35:01,718 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:35:01,718 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:01,718 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:01,718 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d. 2024-12-09T13:35:01,718 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552. 2024-12-09T13:35:01,719 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1676): Region close journal for 9178341f170b995441dfe37d963f910d: Waiting for close lock at 1733751301714Running coprocessor pre-close hooks at 1733751301714Disabling compacts and flushes for region at 1733751301714Disabling writes for close at 1733751301714Writing region close event to WAL at 1733751301715 (+1 ms)Running coprocessor post-close hooks at 1733751301718 (+3 ms)Closed at 1733751301718 2024-12-09T13:35:01,719 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1676): Region close journal for 715af21e3a0a4ff556e6e5f0007c4552: Waiting for close lock at 1733751301715Running coprocessor pre-close hooks at 1733751301715Disabling compacts and flushes for region at 1733751301715Disabling writes for close at 1733751301715Writing region close event to WAL at 1733751301716 (+1 ms)Running coprocessor post-close hooks at 1733751301718 (+2 ms)Closed at 1733751301718 2024-12-09T13:35:01,720 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(157): Closed 9178341f170b995441dfe37d963f910d 2024-12-09T13:35:01,721 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=9178341f170b995441dfe37d963f910d, regionState=CLOSED 2024-12-09T13:35:01,721 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(157): Closed 715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:35:01,721 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=715af21e3a0a4ff556e6e5f0007c4552, regionState=CLOSED 2024-12-09T13:35:01,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9178341f170b995441dfe37d963f910d, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:35:01,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552, server=c48e4b8eb196,44683,1733750968041 because future has completed 2024-12-09T13:35:01,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-12-09T13:35:01,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; CloseRegionProcedure 9178341f170b995441dfe37d963f910d, server=c48e4b8eb196,39211,1733750968235 in 163 msec 2024-12-09T13:35:01,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-12-09T13:35:01,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; CloseRegionProcedure 715af21e3a0a4ff556e6e5f0007c4552, server=c48e4b8eb196,44683,1733750968041 in 162 msec 2024-12-09T13:35:01,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=9178341f170b995441dfe37d963f910d, UNASSIGN in 171 msec 2024-12-09T13:35:01,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-12-09T13:35:01,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=715af21e3a0a4ff556e6e5f0007c4552, UNASSIGN in 172 msec 2024-12-09T13:35:01,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-09T13:35:01,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 185 msec 2024-12-09T13:35:01,745 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751301745"}]},"ts":"1733751301745"} 2024-12-09T13:35:01,747 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-09T13:35:01,747 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-09T13:35:01,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 198 msec 2024-12-09T13:35:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-09T13:35:01,873 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T13:35:01,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-09T13:35:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,875 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-09T13:35:01,876 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=236, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,876 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39211 {}] ipc.CallRunner(138): callId: 405 service: ClientService methodName: Mutate size: 159 connection: 172.17.0.2:45971 deadline: 1733751361876, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=44683 startCode=1733750968041. As of locationSeqNum=28. 2024-12-09T13:35:01,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2 , the old value is region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=44683 startCode=1733750968041. As of locationSeqNum=28. 2024-12-09T13:35:01,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=44683 startCode=1733750968041. As of locationSeqNum=28. 2024-12-09T13:35:01,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2 with the new location region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,44683,1733750968041, seqNum=28 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=44683 startCode=1733750968041. As of locationSeqNum=28. 2024-12-09T13:35:01,891 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:35:01,891 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d 2024-12-09T13:35:01,893 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/recovered.edits] 2024-12-09T13:35:01,893 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/recovered.edits] 2024-12-09T13:35:01,901 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/cf/cb929f6039494a29b4bfaa09df104f6b 2024-12-09T13:35:01,905 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552/recovered.edits/9.seqid 2024-12-09T13:35:01,906 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/715af21e3a0a4ff556e6e5f0007c4552 2024-12-09T13:35:01,909 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/cf/cd3fee1f23dc46adb9da4f03cedde158 2024-12-09T13:35:01,912 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d/recovered.edits/9.seqid 2024-12-09T13:35:01,912 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportWithChecksum/9178341f170b995441dfe37d963f910d 2024-12-09T13:35:01,913 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-09T13:35:01,915 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=236, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,922 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-09T13:35:01,926 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-09T13:35:01,928 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=236, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,928 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-09T13:35:01,928 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751301928"}]},"ts":"9223372036854775807"} 2024-12-09T13:35:01,929 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751301928"}]},"ts":"9223372036854775807"} 2024-12-09T13:35:01,932 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:35:01,932 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 715af21e3a0a4ff556e6e5f0007c4552, NAME => 'testtb-testExportWithChecksum,,1733751240875.715af21e3a0a4ff556e6e5f0007c4552.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9178341f170b995441dfe37d963f910d, NAME => 'testtb-testExportWithChecksum,1,1733751240875.9178341f170b995441dfe37d963f910d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:35:01,932 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-09T13:35:01,933 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751301932"}]},"ts":"9223372036854775807"} 2024-12-09T13:35:01,936 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-09T13:35:01,937 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=236, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-09T13:35:01,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 64 msec 2024-12-09T13:35:01,993 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:01,994 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51849, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:02,012 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44683 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:02,013 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34751, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T13:35:02,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] ipc.CallRunner(138): callId: 3303 service: ClientService methodName: Scan size: 189 connection: 172.17.0.2:34751 deadline: 1733751362012, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,123 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:02,124 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48335, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T13:35:02,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,44683,1733750968041, seqNum=32] 2024-12-09T13:35:02,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:02,130 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47859, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T13:35:02,133 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44683 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-09T13:35:02,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:35:02,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:35:02,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:35:02,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-09T13:35:02,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-09T13:35:02,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-09T13:35:02,212 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T13:35:02,212 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T13:35:02,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-09T13:35:02,212 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T13:35:02,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-09T13:35:02,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,214 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-09T13:35:02,214 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-09T13:35:02,214 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-09T13:35:02,214 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-09T13:35:02,215 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-09T13:35:02,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-09T13:35:02,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-09T13:35:02,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-09T13:35:02,258 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=831 (was 821) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_248799815_22 at /127.0.0.1:39632 [Receiving block BP-67561561-172.17.0.2-1733750962175:blk_1073742326_1502] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:37492 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_725850059_22 at /127.0.0.1:56710 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_735457396_1 at /127.0.0.1:54416 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 157738) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38415 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_735457396_1 at /127.0.0.1:56336 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-67561561-172.17.0.2-1733750962175:blk_1073742326_1502, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7-prefix:c48e4b8eb196,36493,1733750968163.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_248799815_22 at /127.0.0.1:53898 [Receiving block BP-67561561-172.17.0.2-1733750962175:blk_1073742326_1502] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8203 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-67561561-172.17.0.2-1733750962175:blk_1073742326_1502, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:38415 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: PacketResponder: BP-67561561-172.17.0.2-1733750962175:blk_1073742326_1502, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_248799815_22 at /127.0.0.1:60628 [Receiving block BP-67561561-172.17.0.2-1733750962175:blk_1073742326_1502] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:36692 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=853 (was 825) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=648 (was 528) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=898 (was 1279) 2024-12-09T13:35:02,258 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=831 is superior to 500 2024-12-09T13:35:02,275 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=831, OpenFileDescriptor=853, MaxFileDescriptor=1048576, SystemLoadAverage=648, ProcessCount=23, AvailableMemoryMB=898 2024-12-09T13:35:02,275 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=831 is superior to 500 2024-12-09T13:35:02,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T13:35:02,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:02,279 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T13:35:02,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 237 2024-12-09T13:35:02,279 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:35:02,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-09T13:35:02,280 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T13:35:02,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742368_1544 (size=418) 2024-12-09T13:35:02,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742368_1544 (size=418) 2024-12-09T13:35:02,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742368_1544 (size=418) 2024-12-09T13:35:02,297 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 72e9122d33bc2fb2376c9a536c330bd9, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:35:02,298 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3c8784dbaa49d85aceb6fab91f2628cd, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:35:02,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742370_1546 (size=79) 2024-12-09T13:35:02,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742370_1546 (size=79) 2024-12-09T13:35:02,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742370_1546 (size=79) 2024-12-09T13:35:02,320 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:35:02,320 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 72e9122d33bc2fb2376c9a536c330bd9, disabling compactions & flushes 2024-12-09T13:35:02,320 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,320 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,320 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. after waiting 0 ms 2024-12-09T13:35:02,320 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,320 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,320 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 72e9122d33bc2fb2376c9a536c330bd9: Waiting for close lock at 1733751302320Disabling compacts and flushes for region at 1733751302320Disabling writes for close at 1733751302320Writing region close event to WAL at 1733751302320Closed at 1733751302320 2024-12-09T13:35:02,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742369_1545 (size=79) 2024-12-09T13:35:02,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742369_1545 (size=79) 2024-12-09T13:35:02,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742369_1545 (size=79) 2024-12-09T13:35:02,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:35:02,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 3c8784dbaa49d85aceb6fab91f2628cd, disabling compactions & flushes 2024-12-09T13:35:02,330 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. after waiting 0 ms 2024-12-09T13:35:02,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,330 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,330 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3c8784dbaa49d85aceb6fab91f2628cd: Waiting for close lock at 1733751302330Disabling compacts and flushes for region at 1733751302330Disabling writes for close at 1733751302330Writing region close event to WAL at 1733751302330Closed at 1733751302330 2024-12-09T13:35:02,332 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T13:35:02,332 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733751302332"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751302332"}]},"ts":"1733751302332"} 2024-12-09T13:35:02,332 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733751302332"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733751302332"}]},"ts":"1733751302332"} 2024-12-09T13:35:02,335 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T13:35:02,336 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T13:35:02,336 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751302336"}]},"ts":"1733751302336"} 2024-12-09T13:35:02,338 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-09T13:35:02,339 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c48e4b8eb196=0} racks are {/default-rack=0} 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T13:35:02,340 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T13:35:02,340 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T13:35:02,340 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T13:35:02,340 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T13:35:02,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, ASSIGN}, {pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, ASSIGN}] 2024-12-09T13:35:02,342 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, ASSIGN 2024-12-09T13:35:02,343 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, ASSIGN 2024-12-09T13:35:02,346 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, ASSIGN; state=OFFLINE, location=c48e4b8eb196,39211,1733750968235; forceNewPlan=false, retain=false 2024-12-09T13:35:02,347 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, ASSIGN; state=OFFLINE, location=c48e4b8eb196,36493,1733750968163; forceNewPlan=false, retain=false 2024-12-09T13:35:02,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-09T13:35:02,497 INFO [c48e4b8eb196:45049 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T13:35:02,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=3c8784dbaa49d85aceb6fab91f2628cd, regionState=OPENING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:35:02,497 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=72e9122d33bc2fb2376c9a536c330bd9, regionState=OPENING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:35:02,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, ASSIGN because future has completed 2024-12-09T13:35:02,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:35:02,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, ASSIGN because future has completed 2024-12-09T13:35:02,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=241, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:35:02,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-09T13:35:02,653 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,653 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c8784dbaa49d85aceb6fab91f2628cd, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T13:35:02,653 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. service=AccessControlService 2024-12-09T13:35:02,653 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:35:02,653 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,654 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:35:02,654 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7794): checking encryption for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,654 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7797): checking classloading for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,654 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,654 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7752): Opening region: {ENCODED => 72e9122d33bc2fb2376c9a536c330bd9, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T13:35:02,655 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. service=AccessControlService 2024-12-09T13:35:02,655 INFO [StoreOpener-3c8784dbaa49d85aceb6fab91f2628cd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,655 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-09T13:35:02,655 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,655 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T13:35:02,655 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7794): checking encryption for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,655 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7797): checking classloading for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,656 INFO [StoreOpener-3c8784dbaa49d85aceb6fab91f2628cd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c8784dbaa49d85aceb6fab91f2628cd columnFamilyName cf 2024-12-09T13:35:02,656 DEBUG [StoreOpener-3c8784dbaa49d85aceb6fab91f2628cd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:35:02,657 INFO [StoreOpener-3c8784dbaa49d85aceb6fab91f2628cd-1 {}] regionserver.HStore(327): Store=3c8784dbaa49d85aceb6fab91f2628cd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:35:02,657 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1038): replaying wal for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,657 INFO [StoreOpener-72e9122d33bc2fb2376c9a536c330bd9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,657 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,658 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,658 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1048): stopping wal replay for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,658 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1060): Cleaning up temporary data for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,658 INFO [StoreOpener-72e9122d33bc2fb2376c9a536c330bd9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72e9122d33bc2fb2376c9a536c330bd9 columnFamilyName cf 2024-12-09T13:35:02,658 DEBUG [StoreOpener-72e9122d33bc2fb2376c9a536c330bd9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T13:35:02,658 INFO [StoreOpener-72e9122d33bc2fb2376c9a536c330bd9-1 {}] regionserver.HStore(327): Store=72e9122d33bc2fb2376c9a536c330bd9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T13:35:02,659 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1038): replaying wal for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,659 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,659 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,660 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1093): writing seq id for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,660 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1048): stopping wal replay for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,660 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1060): Cleaning up temporary data for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,661 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1093): writing seq id for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,662 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:35:02,662 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1114): Opened 3c8784dbaa49d85aceb6fab91f2628cd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74189381, jitterRate=0.10550792515277863}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:35:02,662 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:02,662 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T13:35:02,663 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1006): Region open journal for 3c8784dbaa49d85aceb6fab91f2628cd: Running coprocessor pre-open hook at 1733751302654Writing region info on filesystem at 1733751302654Initializing all the Stores at 1733751302655 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751302655Cleaning up temporary data from old regions at 1733751302658 (+3 ms)Running coprocessor post-open hooks at 1733751302662 (+4 ms)Region opened successfully at 1733751302663 (+1 ms) 2024-12-09T13:35:02,663 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1114): Opened 72e9122d33bc2fb2376c9a536c330bd9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61630322, jitterRate=-0.08163663744926453}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T13:35:02,663 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:02,663 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1006): Region open journal for 72e9122d33bc2fb2376c9a536c330bd9: Running coprocessor pre-open hook at 1733751302655Writing region info on filesystem at 1733751302655Initializing all the Stores at 1733751302656 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733751302656Cleaning up temporary data from old regions at 1733751302660 (+4 ms)Running coprocessor post-open hooks at 1733751302663 (+3 ms)Region opened successfully at 1733751302663 2024-12-09T13:35:02,664 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd., pid=240, masterSystemTime=1733751302650 2024-12-09T13:35:02,664 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9., pid=241, masterSystemTime=1733751302652 2024-12-09T13:35:02,665 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,665 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:02,666 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=72e9122d33bc2fb2376c9a536c330bd9, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:35:02,666 DEBUG [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,666 INFO [RS_OPEN_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:02,667 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=3c8784dbaa49d85aceb6fab91f2628cd, regionState=OPEN, openSeqNum=2, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:35:02,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=241, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:35:02,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=240, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:35:02,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=238 2024-12-09T13:35:02,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=238, state=SUCCESS, hasLock=false; OpenRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9, server=c48e4b8eb196,36493,1733750968163 in 168 msec 2024-12-09T13:35:02,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-09T13:35:02,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, ASSIGN in 330 msec 2024-12-09T13:35:02,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd, server=c48e4b8eb196,39211,1733750968235 in 170 msec 2024-12-09T13:35:02,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=239, resume processing ppid=237 2024-12-09T13:35:02,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, ASSIGN in 330 msec 2024-12-09T13:35:02,676 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T13:35:02,676 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751302676"}]},"ts":"1733751302676"} 2024-12-09T13:35:02,678 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-09T13:35:02,679 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T13:35:02,680 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-09T13:35:02,687 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44683 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-09T13:35:02,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:02,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 595 msec 2024-12-09T13:35:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-09T13:35:02,903 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T13:35:02,903 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-09T13:35:02,903 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:35:02,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] ipc.CallRunner(138): callId: 519 service: ClientService methodName: Scan size: 113 connection: 172.17.0.2:41176 deadline: 1733751362903, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,905 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,905 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:02,905 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,39211,1733750968235, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=c48e4b8eb196 port=36493 startCode=1733750968163. As of locationSeqNum=211. 2024-12-09T13:35:03,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-09T13:35:03,017 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:35:03,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-09T13:35:03,018 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T13:35:03,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T13:35:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751303021 (current time:1733751303021). 2024-12-09T13:35:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:35:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-09T13:35:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:35:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3888b27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:35:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:35:03,023 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:35:03,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:35:03,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:35:03,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19dd44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:35:03,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:35:03,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,024 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:35:03,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8e4b41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:35:03,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=-1] 2024-12-09T13:35:03,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:03,027 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:03,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:35:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:35:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,030 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:35:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7eacbde0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:35:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:35:03,032 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:35:03,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:35:03,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:35:03,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c371eef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:35:03,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:35:03,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,033 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:35:03,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23c53c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:35:03,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=-1] 2024-12-09T13:35:03,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:03,036 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38764, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:03,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,44683,1733750968041, seqNum=32] 2024-12-09T13:35:03,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:03,040 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39176, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:03,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:35:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:35:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,042 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:35:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-09T13:35:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:35:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T13:35:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-09T13:35:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T13:35:03,045 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:35:03,047 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:35:03,049 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:35:03,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742371_1547 (size=203) 2024-12-09T13:35:03,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742371_1547 (size=203) 2024-12-09T13:35:03,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742371_1547 (size=203) 2024-12-09T13:35:03,063 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:35:03,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd}] 2024-12-09T13:35:03,064 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:03,064 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T13:35:03,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-09T13:35:03,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-09T13:35:03,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:03,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 3c8784dbaa49d85aceb6fab91f2628cd: 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 72e9122d33bc2fb2376c9a536c330bd9: 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:35:03,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-09T13:35:03,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742372_1548 (size=82) 2024-12-09T13:35:03,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742372_1548 (size=82) 2024-12-09T13:35:03,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742372_1548 (size=82) 2024-12-09T13:35:03,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:03,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-09T13:35:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-09T13:35:03,231 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:03,231 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742373_1549 (size=82) 2024-12-09T13:35:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742373_1549 (size=82) 2024-12-09T13:35:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742373_1549 (size=82) 2024-12-09T13:35:03,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd in 170 msec 2024-12-09T13:35:03,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:03,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-09T13:35:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-09T13:35:03,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:03,236 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:03,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-09T13:35:03,239 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:35:03,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9 in 174 msec 2024-12-09T13:35:03,240 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:35:03,240 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:35:03,240 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,241 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742374_1550 (size=585) 2024-12-09T13:35:03,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742374_1550 (size=585) 2024-12-09T13:35:03,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742374_1550 (size=585) 2024-12-09T13:35:03,249 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:35:03,255 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:35:03,255 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,257 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:35:03,257 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-09T13:35:03,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 215 msec 2024-12-09T13:35:03,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-09T13:35:03,363 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T13:35:03,369 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='09c7a4f3be9cd7d62f5959341c3cef5fe', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9., hostname=c48e4b8eb196,36493,1733750968163, seqNum=2] 2024-12-09T13:35:03,371 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='1df29708a0dc60a30104bc56477be0ebd', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:35:03,372 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='286e4c0fa739346496bf31476673e42ed', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:35:03,373 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='3aaac2ebe3312741661b786e1f3ba9b96', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:35:03,373 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='42a2bae07b5677da9a799c25e241ddb71', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:35:03,375 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='53f9e8120e3b6e10d7a6d0368b3a88dec', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd., hostname=c48e4b8eb196,39211,1733750968235, seqNum=2] 2024-12-09T13:35:03,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39211 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:35:03,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36493 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-09T13:35:03,381 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T13:35:03,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:03,384 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T13:35:03,387 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T13:35:03,394 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T13:35:03,402 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-09T13:35:03,406 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T13:35:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733751303406 (current time:1733751303406). 2024-12-09T13:35:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-09T13:35:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-09T13:35:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-09T13:35:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1583c7c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:35:03,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:35:03,407 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:35:03,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:35:03,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:35:03,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@229db828, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:35:03,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:35:03,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,409 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44808, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:35:03,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d374c7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:35:03,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=-1] 2024-12-09T13:35:03,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:03,412 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:03,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:35:03,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:35:03,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,414 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:35:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b0532f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ClusterIdFetcher(90): Going to request c48e4b8eb196,45049,-1 for getting cluster id 2024-12-09T13:35:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T13:35:03,416 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93ce3e14-a429-4982-9e09-44291b12e9d6' 2024-12-09T13:35:03,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T13:35:03,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93ce3e14-a429-4982-9e09-44291b12e9d6" 2024-12-09T13:35:03,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38327a7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c48e4b8eb196,45049,-1] 2024-12-09T13:35:03,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T13:35:03,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,418 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T13:35:03,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@233578b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T13:35:03,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T13:35:03,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c48e4b8eb196,36493,1733750968163, seqNum=-1] 2024-12-09T13:35:03,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:03,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38782, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:03,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., hostname=c48e4b8eb196,44683,1733750968041, seqNum=32] 2024-12-09T13:35:03,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T13:35:03,426 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T13:35:03,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049. 2024-12-09T13:35:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor267.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T13:35:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:03,428 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:35:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-09T13:35:03,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-09T13:35:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-09T13:35:03,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-12-09T13:35:03,431 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-09T13:35:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T13:35:03,432 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-09T13:35:03,434 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-09T13:35:03,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742375_1551 (size=198) 2024-12-09T13:35:03,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742375_1551 (size=198) 2024-12-09T13:35:03,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742375_1551 (size=198) 2024-12-09T13:35:03,455 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-09T13:35:03,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9}, {pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd}] 2024-12-09T13:35:03,457 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:03,457 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T13:35:03,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36493 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=246 2024-12-09T13:35:03,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39211 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=247 2024-12-09T13:35:03,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:03,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:03,609 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2902): Flushing 72e9122d33bc2fb2376c9a536c330bd9 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-09T13:35:03,609 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2902): Flushing 3c8784dbaa49d85aceb6fab91f2628cd 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-09T13:35:03,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/.tmp/cf/7c7aa9ecd9d94f2f944b7963563d4626 is 71, key is 00fd2cbacd944366305968b617323c23/cf:q/1733751303380/Put/seqid=0 2024-12-09T13:35:03,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742376_1552 (size=5356) 2024-12-09T13:35:03,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742376_1552 (size=5356) 2024-12-09T13:35:03,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742376_1552 (size=5356) 2024-12-09T13:35:03,642 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/.tmp/cf/7c7aa9ecd9d94f2f944b7963563d4626 2024-12-09T13:35:03,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/.tmp/cf/76fefb79d416441b9680cd1dd2eb7124 is 71, key is 10f38e88f9818b3703277443b22ee4f2/cf:q/1733751303379/Put/seqid=0 2024-12-09T13:35:03,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/.tmp/cf/7c7aa9ecd9d94f2f944b7963563d4626 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf/7c7aa9ecd9d94f2f944b7963563d4626 2024-12-09T13:35:03,657 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf/7c7aa9ecd9d94f2f944b7963563d4626, entries=4, sequenceid=6, filesize=5.2 K 2024-12-09T13:35:03,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742377_1553 (size=8256) 2024-12-09T13:35:03,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742377_1553 (size=8256) 2024-12-09T13:35:03,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742377_1553 (size=8256) 2024-12-09T13:35:03,662 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 72e9122d33bc2fb2376c9a536c330bd9 in 53ms, sequenceid=6, compaction requested=false 2024-12-09T13:35:03,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-09T13:35:03,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2603): Flush status journal for 72e9122d33bc2fb2376c9a536c330bd9: 2024-12-09T13:35:03,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T13:35:03,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:35:03,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf/7c7aa9ecd9d94f2f944b7963563d4626] hfiles 2024-12-09T13:35:03,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf/7c7aa9ecd9d94f2f944b7963563d4626 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:03,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742378_1554 (size=121) 2024-12-09T13:35:03,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742378_1554 (size=121) 2024-12-09T13:35:03,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742378_1554 (size=121) 2024-12-09T13:35:03,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:03,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=246 2024-12-09T13:35:03,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=246 2024-12-09T13:35:03,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:03,670 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:03,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9 in 216 msec 2024-12-09T13:35:03,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T13:35:04,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T13:35:04,062 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/.tmp/cf/76fefb79d416441b9680cd1dd2eb7124 2024-12-09T13:35:04,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/.tmp/cf/76fefb79d416441b9680cd1dd2eb7124 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf/76fefb79d416441b9680cd1dd2eb7124 2024-12-09T13:35:04,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf/76fefb79d416441b9680cd1dd2eb7124, entries=46, sequenceid=6, filesize=8.1 K 2024-12-09T13:35:04,073 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 3c8784dbaa49d85aceb6fab91f2628cd in 464ms, sequenceid=6, compaction requested=false 2024-12-09T13:35:04,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2603): Flush status journal for 3c8784dbaa49d85aceb6fab91f2628cd: 2024-12-09T13:35:04,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-09T13:35:04,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-09T13:35:04,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf/76fefb79d416441b9680cd1dd2eb7124] hfiles 2024-12-09T13:35:04,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf/76fefb79d416441b9680cd1dd2eb7124 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742379_1555 (size=121) 2024-12-09T13:35:04,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742379_1555 (size=121) 2024-12-09T13:35:04,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742379_1555 (size=121) 2024-12-09T13:35:04,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:04,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/c48e4b8eb196:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=247 2024-12-09T13:35:04,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster(4169): Remote procedure done, pid=247 2024-12-09T13:35:04,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:04,085 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:04,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=245 2024-12-09T13:35:04,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd in 631 msec 2024-12-09T13:35:04,087 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-09T13:35:04,088 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-09T13:35:04,088 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-09T13:35:04,088 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,089 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742380_1556 (size=663) 2024-12-09T13:35:04,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742380_1556 (size=663) 2024-12-09T13:35:04,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742380_1556 (size=663) 2024-12-09T13:35:04,106 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-09T13:35:04,114 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-09T13:35:04,114 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,116 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-09T13:35:04,116 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-12-09T13:35:04,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 687 msec 2024-12-09T13:35:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-09T13:35:04,563 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T13:35:04,563 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563 2024-12-09T13:35:04,563 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:45819, tgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563, rawTgtDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563, srcFsUri=hdfs://localhost:45819, srcDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:35:04,589 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:45819, inputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7 2024-12-09T13:35:04,589 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,591 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-09T13:35:04,595 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:04,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742381_1557 (size=198) 2024-12-09T13:35:04,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742381_1557 (size=198) 2024-12-09T13:35:04,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742382_1558 (size=663) 2024-12-09T13:35:04,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742381_1557 (size=198) 2024-12-09T13:35:04,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742382_1558 (size=663) 2024-12-09T13:35:04,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742382_1558 (size=663) 2024-12-09T13:35:04,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:04,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:04,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-15487062319807661915.jar 2024-12-09T13:35:05,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop-8754759155412030760.jar 2024-12-09T13:35:05,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-09T13:35:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-09T13:35:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-09T13:35:05,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-09T13:35:05,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-09T13:35:05,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:35:05,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-09T13:35:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742383_1559 (size=5175431) 2024-12-09T13:35:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742383_1559 (size=5175431) 2024-12-09T13:35:05,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742383_1559 (size=5175431) 2024-12-09T13:35:05,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742384_1560 (size=1877034) 2024-12-09T13:35:05,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742384_1560 (size=1877034) 2024-12-09T13:35:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742384_1560 (size=1877034) 2024-12-09T13:35:05,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742385_1561 (size=20406) 2024-12-09T13:35:05,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742385_1561 (size=20406) 2024-12-09T13:35:05,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742385_1561 (size=20406) 2024-12-09T13:35:05,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742386_1562 (size=24096) 2024-12-09T13:35:05,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742386_1562 (size=24096) 2024-12-09T13:35:05,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742386_1562 (size=24096) 2024-12-09T13:35:05,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742387_1563 (size=503880) 2024-12-09T13:35:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742387_1563 (size=503880) 2024-12-09T13:35:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742387_1563 (size=503880) 2024-12-09T13:35:05,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742388_1564 (size=4695811) 2024-12-09T13:35:05,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742388_1564 (size=4695811) 2024-12-09T13:35:05,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742388_1564 (size=4695811) 2024-12-09T13:35:05,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742389_1565 (size=111872) 2024-12-09T13:35:05,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742389_1565 (size=111872) 2024-12-09T13:35:05,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742389_1565 (size=111872) 2024-12-09T13:35:05,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742390_1566 (size=1597213) 2024-12-09T13:35:05,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742390_1566 (size=1597213) 2024-12-09T13:35:05,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742390_1566 (size=1597213) 2024-12-09T13:35:05,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742391_1567 (size=4188619) 2024-12-09T13:35:05,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742391_1567 (size=4188619) 2024-12-09T13:35:05,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742391_1567 (size=4188619) 2024-12-09T13:35:05,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742392_1568 (size=127628) 2024-12-09T13:35:05,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742392_1568 (size=127628) 2024-12-09T13:35:05,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742392_1568 (size=127628) 2024-12-09T13:35:05,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742393_1569 (size=29229) 2024-12-09T13:35:05,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742393_1569 (size=29229) 2024-12-09T13:35:05,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742393_1569 (size=29229) 2024-12-09T13:35:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742394_1570 (size=443172) 2024-12-09T13:35:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742394_1570 (size=443172) 2024-12-09T13:35:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742394_1570 (size=443172) 2024-12-09T13:35:05,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742395_1571 (size=45609) 2024-12-09T13:35:05,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742395_1571 (size=45609) 2024-12-09T13:35:05,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742395_1571 (size=45609) 2024-12-09T13:35:05,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742396_1572 (size=8360360) 2024-12-09T13:35:05,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742396_1572 (size=8360360) 2024-12-09T13:35:05,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742396_1572 (size=8360360) 2024-12-09T13:35:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742397_1573 (size=1323991) 2024-12-09T13:35:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742397_1573 (size=1323991) 2024-12-09T13:35:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742397_1573 (size=1323991) 2024-12-09T13:35:05,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742398_1574 (size=232957) 2024-12-09T13:35:05,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742398_1574 (size=232957) 2024-12-09T13:35:05,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742398_1574 (size=232957) 2024-12-09T13:35:05,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742399_1575 (size=217634) 2024-12-09T13:35:05,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742399_1575 (size=217634) 2024-12-09T13:35:05,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742399_1575 (size=217634) 2024-12-09T13:35:05,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742400_1576 (size=903930) 2024-12-09T13:35:05,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742400_1576 (size=903930) 2024-12-09T13:35:05,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742400_1576 (size=903930) 2024-12-09T13:35:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742401_1577 (size=77835) 2024-12-09T13:35:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742401_1577 (size=77835) 2024-12-09T13:35:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742401_1577 (size=77835) 2024-12-09T13:35:05,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742402_1578 (size=1832290) 2024-12-09T13:35:05,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742402_1578 (size=1832290) 2024-12-09T13:35:05,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742402_1578 (size=1832290) 2024-12-09T13:35:05,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742403_1579 (size=6425017) 2024-12-09T13:35:05,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742403_1579 (size=6425017) 2024-12-09T13:35:05,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742403_1579 (size=6425017) 2024-12-09T13:35:05,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742404_1580 (size=30949) 2024-12-09T13:35:05,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742404_1580 (size=30949) 2024-12-09T13:35:05,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742404_1580 (size=30949) 2024-12-09T13:35:05,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742405_1581 (size=136454) 2024-12-09T13:35:05,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742405_1581 (size=136454) 2024-12-09T13:35:05,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742405_1581 (size=136454) 2024-12-09T13:35:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742406_1582 (size=322274) 2024-12-09T13:35:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742406_1582 (size=322274) 2024-12-09T13:35:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742406_1582 (size=322274) 2024-12-09T13:35:05,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742407_1583 (size=131440) 2024-12-09T13:35:05,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742407_1583 (size=131440) 2024-12-09T13:35:05,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742407_1583 (size=131440) 2024-12-09T13:35:05,925 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-09T13:35:05,927 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-09T13:35:05,929 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-09T13:35:05,929 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-09T13:35:05,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742408_1584 (size=469) 2024-12-09T13:35:05,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742408_1584 (size=469) 2024-12-09T13:35:05,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742408_1584 (size=469) 2024-12-09T13:35:05,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742409_1585 (size=21) 2024-12-09T13:35:05,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742409_1585 (size=21) 2024-12-09T13:35:05,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742409_1585 (size=21) 2024-12-09T13:35:05,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742410_1586 (size=304274) 2024-12-09T13:35:05,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742410_1586 (size=304274) 2024-12-09T13:35:05,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742410_1586 (size=304274) 2024-12-09T13:35:06,064 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:35:06,064 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-09T13:35:06,066 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0010_000001 (auth:SIMPLE) from 127.0.0.1:41896 2024-12-09T13:35:06,094 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000001/launch_container.sh] 2024-12-09T13:35:06,094 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000001/container_tokens] 2024-12-09T13:35:06,094 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0010/container_1733750974572_0010_01_000001/sysfs] 2024-12-09T13:35:07,035 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0011_000001 (auth:SIMPLE) from 127.0.0.1:43222 2024-12-09T13:35:07,236 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:35:07,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:07,484 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-09T13:35:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-09T13:35:07,733 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f0043ab8119e670f2bd7cf64ad44aea8, had cached 0 bytes from a total of 8188 2024-12-09T13:35:07,733 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b965e8c57383b84f23fb09c5323ac145, had cached 0 bytes from a total of 5422 2024-12-09T13:35:12,997 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:35:15,899 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0011_000001 (auth:SIMPLE) from 127.0.0.1:45822 2024-12-09T13:35:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742411_1587 (size=349996) 2024-12-09T13:35:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742411_1587 (size=349996) 2024-12-09T13:35:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742411_1587 (size=349996) 2024-12-09T13:35:17,350 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-12-09T13:35:18,121 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0011_000001 (auth:SIMPLE) from 127.0.0.1:34024 2024-12-09T13:35:18,121 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0011_000001 (auth:SIMPLE) from 127.0.0.1:48426 2024-12-09T13:35:18,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 96d9654456b57311f15f8834c0cf2325, had cached 0 bytes from a total of 5791 2024-12-09T13:35:21,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742412_1588 (size=8256) 2024-12-09T13:35:21,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742412_1588 (size=8256) 2024-12-09T13:35:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742412_1588 (size=8256) 2024-12-09T13:35:21,310 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_2/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000002/launch_container.sh] 2024-12-09T13:35:21,310 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_2/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000002/container_tokens] 2024-12-09T13:35:21,310 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_2/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000002/sysfs] 2024-12-09T13:35:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742414_1590 (size=5356) 2024-12-09T13:35:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742414_1590 (size=5356) 2024-12-09T13:35:22,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742414_1590 (size=5356) 2024-12-09T13:35:22,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742413_1589 (size=22220) 2024-12-09T13:35:22,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742413_1589 (size=22220) 2024-12-09T13:35:22,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742413_1589 (size=22220) 2024-12-09T13:35:22,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742415_1591 (size=476) 2024-12-09T13:35:22,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742415_1591 (size=476) 2024-12-09T13:35:22,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742415_1591 (size=476) 2024-12-09T13:35:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742416_1592 (size=22220) 2024-12-09T13:35:22,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742416_1592 (size=22220) 2024-12-09T13:35:22,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742416_1592 (size=22220) 2024-12-09T13:35:22,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742417_1593 (size=349996) 2024-12-09T13:35:22,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742417_1593 (size=349996) 2024-12-09T13:35:22,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742417_1593 (size=349996) 2024-12-09T13:35:22,218 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0011_000001 (auth:SIMPLE) from 127.0.0.1:36952 2024-12-09T13:35:22,227 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000003/launch_container.sh] 2024-12-09T13:35:22,227 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000003/container_tokens] 2024-12-09T13:35:22,227 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-1_1/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000003/sysfs] 2024-12-09T13:35:24,125 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-09T13:35:24,125 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-09T13:35:24,130 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,130 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-09T13:35:24,131 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-09T13:35:24,131 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,131 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-09T13:35:24,131 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-09T13:35:24,131 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1457384622_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,131 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-09T13:35:24,131 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/export-test/export-1733751304563/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-09T13:35:24,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-09T13:35:24,138 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751324138"}]},"ts":"1733751324138"} 2024-12-09T13:35:24,140 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-09T13:35:24,140 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-09T13:35:24,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-09T13:35:24,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, UNASSIGN}, {pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, UNASSIGN}] 2024-12-09T13:35:24,143 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, UNASSIGN 2024-12-09T13:35:24,143 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, UNASSIGN 2024-12-09T13:35:24,143 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=72e9122d33bc2fb2376c9a536c330bd9, regionState=CLOSING, regionLocation=c48e4b8eb196,36493,1733750968163 2024-12-09T13:35:24,143 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=3c8784dbaa49d85aceb6fab91f2628cd, regionState=CLOSING, regionLocation=c48e4b8eb196,39211,1733750968235 2024-12-09T13:35:24,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, UNASSIGN because future has completed 2024-12-09T13:35:24,145 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:35:24,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd, server=c48e4b8eb196,39211,1733750968235}] 2024-12-09T13:35:24,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, UNASSIGN because future has completed 2024-12-09T13:35:24,146 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T13:35:24,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9, server=c48e4b8eb196,36493,1733750968163}] 2024-12-09T13:35:24,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-09T13:35:24,298 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(122): Close 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:24,299 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:35:24,299 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1722): Closing 3c8784dbaa49d85aceb6fab91f2628cd, disabling compactions & flushes 2024-12-09T13:35:24,299 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:24,299 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:24,299 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. after waiting 0 ms 2024-12-09T13:35:24,300 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:24,300 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(122): Close 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:24,300 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T13:35:24,300 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1722): Closing 72e9122d33bc2fb2376c9a536c330bd9, disabling compactions & flushes 2024-12-09T13:35:24,300 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:24,301 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:24,301 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. after waiting 0 ms 2024-12-09T13:35:24,301 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:24,308 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:35:24,308 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T13:35:24,308 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:24,308 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:24,308 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd. 2024-12-09T13:35:24,308 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9. 2024-12-09T13:35:24,308 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1676): Region close journal for 72e9122d33bc2fb2376c9a536c330bd9: Waiting for close lock at 1733751324300Running coprocessor pre-close hooks at 1733751324300Disabling compacts and flushes for region at 1733751324300Disabling writes for close at 1733751324301 (+1 ms)Writing region close event to WAL at 1733751324304 (+3 ms)Running coprocessor post-close hooks at 1733751324308 (+4 ms)Closed at 1733751324308 2024-12-09T13:35:24,308 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1676): Region close journal for 3c8784dbaa49d85aceb6fab91f2628cd: Waiting for close lock at 1733751324299Running coprocessor pre-close hooks at 1733751324299Disabling compacts and flushes for region at 1733751324299Disabling writes for close at 1733751324300 (+1 ms)Writing region close event to WAL at 1733751324302 (+2 ms)Running coprocessor post-close hooks at 1733751324308 (+6 ms)Closed at 1733751324308 2024-12-09T13:35:24,310 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(157): Closed 72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:24,311 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=72e9122d33bc2fb2376c9a536c330bd9, regionState=CLOSED 2024-12-09T13:35:24,311 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(157): Closed 3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:24,311 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=3c8784dbaa49d85aceb6fab91f2628cd, regionState=CLOSED 2024-12-09T13:35:24,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9, server=c48e4b8eb196,36493,1733750968163 because future has completed 2024-12-09T13:35:24,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd, server=c48e4b8eb196,39211,1733750968235 because future has completed 2024-12-09T13:35:24,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=253, resume processing ppid=250 2024-12-09T13:35:24,315 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=253, ppid=250, state=SUCCESS, hasLock=false; CloseRegionProcedure 72e9122d33bc2fb2376c9a536c330bd9, server=c48e4b8eb196,36493,1733750968163 in 167 msec 2024-12-09T13:35:24,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=252, resume processing ppid=251 2024-12-09T13:35:24,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=252, ppid=251, state=SUCCESS, hasLock=false; CloseRegionProcedure 3c8784dbaa49d85aceb6fab91f2628cd, server=c48e4b8eb196,39211,1733750968235 in 169 msec 2024-12-09T13:35:24,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=72e9122d33bc2fb2376c9a536c330bd9, UNASSIGN in 173 msec 2024-12-09T13:35:24,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=251, resume processing ppid=249 2024-12-09T13:35:24,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3c8784dbaa49d85aceb6fab91f2628cd, UNASSIGN in 173 msec 2024-12-09T13:35:24,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-12-09T13:35:24,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 177 msec 2024-12-09T13:35:24,320 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733751324320"}]},"ts":"1733751324320"} 2024-12-09T13:35:24,322 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-09T13:35:24,322 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-09T13:35:24,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 186 msec 2024-12-09T13:35:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-09T13:35:24,455 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T13:35:24,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] procedure2.ProcedureExecutor(1139): Stored pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,460 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,462 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=254, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,465 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44683 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,467 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:24,467 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:24,468 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/recovered.edits] 2024-12-09T13:35:24,468 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf, FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/recovered.edits] 2024-12-09T13:35:24,472 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf/76fefb79d416441b9680cd1dd2eb7124 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/cf/76fefb79d416441b9680cd1dd2eb7124 2024-12-09T13:35:24,472 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf/7c7aa9ecd9d94f2f944b7963563d4626 to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/cf/7c7aa9ecd9d94f2f944b7963563d4626 2024-12-09T13:35:24,474 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9/recovered.edits/9.seqid 2024-12-09T13:35:24,474 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/recovered.edits/9.seqid to hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd/recovered.edits/9.seqid 2024-12-09T13:35:24,474 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/72e9122d33bc2fb2376c9a536c330bd9 2024-12-09T13:35:24,474 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testtb-testExportFileSystemStateWithSkipTmp/3c8784dbaa49d85aceb6fab91f2628cd 2024-12-09T13:35:24,474 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-09T13:35:24,476 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=254, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,478 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-09T13:35:24,480 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-09T13:35:24,481 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=254, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,481 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-09T13:35:24,481 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751324481"}]},"ts":"9223372036854775807"} 2024-12-09T13:35:24,482 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733751324481"}]},"ts":"9223372036854775807"} 2024-12-09T13:35:24,483 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T13:35:24,483 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 72e9122d33bc2fb2376c9a536c330bd9, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733751302276.72e9122d33bc2fb2376c9a536c330bd9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3c8784dbaa49d85aceb6fab91f2628cd, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T13:35:24,483 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-09T13:35:24,484 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733751324483"}]},"ts":"9223372036854775807"} 2024-12-09T13:35:24,485 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-09T13:35:24,486 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=254, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=254, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 30 msec 2024-12-09T13:35:24,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T13:35:24,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T13:35:24,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T13:35:24,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:24,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-09T13:35:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-12-09T13:35:24,534 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:24,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:24,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:24,534 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-09T13:35:24,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-09T13:35:24,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-09T13:35:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-09T13:35:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:24,564 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=833 (was 831) Potentially hanging thread: Thread-8951 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:49984 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:42588 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1457384622_22 at /127.0.0.1:47908 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:36299 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 161060) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-454133608_1 at /127.0.0.1:47876 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=842 (was 853), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=710 (was 648) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 23), AvailableMemoryMB=1179 (was 898) - AvailableMemoryMB LEAK? - 2024-12-09T13:35:24,564 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=833 is superior to 500 2024-12-09T13:35:24,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-09T13:35:24,571 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57a5f2bc{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T13:35:24,573 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4edc73e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:35:24,573 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:35:24,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30e4067{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T13:35:24,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@568cbbcc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:35:26,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:35:27,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-09T13:35:28,286 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733750974572_0011_000001 (auth:SIMPLE) from 127.0.0.1:40012 2024-12-09T13:35:28,295 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000001/launch_container.sh] 2024-12-09T13:35:28,295 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000001/container_tokens] 2024-12-09T13:35:28,295 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test/data/MiniMRCluster_1592312836/yarn-181541343/MiniMRCluster_1592312836-localDir-nm-0_3/usercache/jenkins/appcache/application_1733750974572_0011/container_1733750974572_0011_01_000001/sysfs] 2024-12-09T13:35:29,717 WARN [regionserver/c48e4b8eb196:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-12-09T13:35:29,829 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:35:41,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e958680{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-09T13:35:41,595 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26728a0b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:35:41,595 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:35:41,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18e8f4ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T13:35:41,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5011963e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:35:52,733 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f0043ab8119e670f2bd7cf64ad44aea8, had cached 0 bytes from a total of 8188 2024-12-09T13:35:52,733 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b965e8c57383b84f23fb09c5323ac145, had cached 0 bytes from a total of 5422 2024-12-09T13:35:56,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:35:58,601 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-09T13:35:58,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f35e2e0{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-09T13:35:58,602 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61b551a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:35:58,602 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:35:58,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@96821f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T13:35:58,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@544691f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:35:58,604 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-09T13:35:58,610 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-09T13:35:58,610 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-09T13:35:58,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741830_1006 (size=1177971) 2024-12-09T13:35:58,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741830_1006 (size=1177971) 2024-12-09T13:35:58,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741830_1006 (size=1177971) 2024-12-09T13:35:58,613 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-09T13:35:58,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b668d97{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-09T13:35:58,617 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1772f5a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:35:58,617 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:35:58,618 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34002cd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-09T13:35:58,618 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ce493c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:35:58,619 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-09T13:35:58,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-09T13:35:58,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T13:35:58,619 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T13:35:58,619 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:35:58,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,619 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T13:35:58,619 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T13:35:58,619 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=471220611, stopped=false 2024-12-09T13:35:58,620 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,620 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-09T13:35:58,620 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c48e4b8eb196,45049,1733750967073 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:35:58,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:35:58,665 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T13:35:58,665 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T13:35:58,665 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:35:58,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:35:58,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:35:58,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:35:58,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,665 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T13:35:58,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c48e4b8eb196,44683,1733750968041' ***** 2024-12-09T13:35:58,666 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T13:35:58,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c48e4b8eb196,36493,1733750968163' ***** 2024-12-09T13:35:58,666 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T13:35:58,666 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c48e4b8eb196,39211,1733750968235' ***** 2024-12-09T13:35:58,666 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,666 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T13:35:58,666 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T13:35:58,666 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T13:35:58,667 INFO [RS:0;c48e4b8eb196:44683 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T13:35:58,667 INFO [RS:1;c48e4b8eb196:36493 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T13:35:58,667 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T13:35:58,667 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T13:35:58,667 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T13:35:58,667 INFO [RS:0;c48e4b8eb196:44683 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T13:35:58,667 INFO [RS:1;c48e4b8eb196:36493 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(3091): Received CLOSE for f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:35:58,667 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(3091): Received CLOSE for b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:35:58,667 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(3091): Received CLOSE for 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:35:58,667 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(959): stopping server c48e4b8eb196,36493,1733750968163 2024-12-09T13:35:58,667 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(959): stopping server c48e4b8eb196,44683,1733750968041 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(959): stopping server c48e4b8eb196,39211,1733750968235 2024-12-09T13:35:58,667 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T13:35:58,667 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T13:35:58,667 INFO [RS:1;c48e4b8eb196:36493 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c48e4b8eb196:36493. 2024-12-09T13:35:58,667 INFO [RS:2;c48e4b8eb196:39211 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;c48e4b8eb196:39211. 2024-12-09T13:35:58,667 INFO [RS:0;c48e4b8eb196:44683 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c48e4b8eb196:44683. 2024-12-09T13:35:58,667 DEBUG [RS:1;c48e4b8eb196:36493 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:35:58,667 DEBUG [RS:1;c48e4b8eb196:36493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,668 DEBUG [RS:2;c48e4b8eb196:39211 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:35:58,668 DEBUG [RS:0;c48e4b8eb196:44683 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:35:58,668 DEBUG [RS:2;c48e4b8eb196:39211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,668 DEBUG [RS:0;c48e4b8eb196:44683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,668 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T13:35:58,668 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T13:35:58,668 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T13:35:58,668 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T13:35:58,668 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T13:35:58,668 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T13:35:58,668 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1325): Online Regions={f0043ab8119e670f2bd7cf64ad44aea8=testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8.} 2024-12-09T13:35:58,668 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1325): Online Regions={96d9654456b57311f15f8834c0cf2325=hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325.} 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 96d9654456b57311f15f8834c0cf2325, disabling compactions & flushes 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b965e8c57383b84f23fb09c5323ac145, disabling compactions & flushes 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f0043ab8119e670f2bd7cf64ad44aea8, disabling compactions & flushes 2024-12-09T13:35:58,668 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:35:58,668 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. after waiting 0 ms 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. after waiting 0 ms 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:35:58,668 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:35:58,668 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 96d9654456b57311f15f8834c0cf2325 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-09T13:35:58,668 DEBUG [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1351): Waiting on f0043ab8119e670f2bd7cf64ad44aea8 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. after waiting 0 ms 2024-12-09T13:35:58,668 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:35:58,668 DEBUG [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1351): Waiting on 96d9654456b57311f15f8834c0cf2325 2024-12-09T13:35:58,669 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T13:35:58,669 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b965e8c57383b84f23fb09c5323ac145=testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145.} 2024-12-09T13:35:58,669 DEBUG [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b965e8c57383b84f23fb09c5323ac145 2024-12-09T13:35:58,669 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T13:35:58,669 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T13:35:58,669 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T13:35:58,669 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T13:35:58,669 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T13:35:58,669 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=10.15 KB heapSize=17.11 KB 2024-12-09T13:35:58,672 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/f0043ab8119e670f2bd7cf64ad44aea8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T13:35:58,672 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/default/testExportExpiredSnapshot/b965e8c57383b84f23fb09c5323ac145/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T13:35:58,673 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,673 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,673 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:35:58,673 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:35:58,673 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f0043ab8119e670f2bd7cf64ad44aea8: Waiting for close lock at 1733751358667Running coprocessor pre-close hooks at 1733751358668 (+1 ms)Disabling compacts and flushes for region at 1733751358668Disabling writes for close at 1733751358668Writing region close event to WAL at 1733751358669 (+1 ms)Running coprocessor post-close hooks at 1733751358673 (+4 ms)Closed at 1733751358673 2024-12-09T13:35:58,673 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b965e8c57383b84f23fb09c5323ac145: Waiting for close lock at 1733751358667Running coprocessor pre-close hooks at 1733751358668 (+1 ms)Disabling compacts and flushes for region at 1733751358668Disabling writes for close at 1733751358668Writing region close event to WAL at 1733751358670 (+2 ms)Running coprocessor post-close hooks at 1733751358673 (+3 ms)Closed at 1733751358673 2024-12-09T13:35:58,673 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733751217384.f0043ab8119e670f2bd7cf64ad44aea8. 2024-12-09T13:35:58,673 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733751217384.b965e8c57383b84f23fb09c5323ac145. 2024-12-09T13:35:58,682 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/.tmp/l/2a87a38ec6a64ae9a141d4b2d8e44b35 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733751324463/DeleteFamily/seqid=0 2024-12-09T13:35:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742418_1594 (size=5142) 2024-12-09T13:35:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742418_1594 (size=5142) 2024-12-09T13:35:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742418_1594 (size=5142) 2024-12-09T13:35:58,686 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=37 (bloomFilter=false), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/.tmp/l/2a87a38ec6a64ae9a141d4b2d8e44b35 2024-12-09T13:35:58,689 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/info/96e3ccfe18c34a0eabdfad42d881b5a5 is 131, key is hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325./info:regioninfo/1733751273283/Put/seqid=0 2024-12-09T13:35:58,691 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2a87a38ec6a64ae9a141d4b2d8e44b35 2024-12-09T13:35:58,691 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/.tmp/l/2a87a38ec6a64ae9a141d4b2d8e44b35 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/l/2a87a38ec6a64ae9a141d4b2d8e44b35 2024-12-09T13:35:58,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742419_1595 (size=7555) 2024-12-09T13:35:58,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742419_1595 (size=7555) 2024-12-09T13:35:58,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742419_1595 (size=7555) 2024-12-09T13:35:58,694 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.30 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/info/96e3ccfe18c34a0eabdfad42d881b5a5 2024-12-09T13:35:58,696 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2a87a38ec6a64ae9a141d4b2d8e44b35 2024-12-09T13:35:58,696 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/l/2a87a38ec6a64ae9a141d4b2d8e44b35, entries=2, sequenceid=37, filesize=5.0 K 2024-12-09T13:35:58,697 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 96d9654456b57311f15f8834c0cf2325 in 29ms, sequenceid=37, compaction requested=false 2024-12-09T13:35:58,697 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-09T13:35:58,700 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/acl/96d9654456b57311f15f8834c0cf2325/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=31 2024-12-09T13:35:58,701 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,701 INFO [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:35:58,701 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 96d9654456b57311f15f8834c0cf2325: Waiting for close lock at 1733751358667Running coprocessor pre-close hooks at 1733751358668 (+1 ms)Disabling compacts and flushes for region at 1733751358668Disabling writes for close at 1733751358668Obtaining lock to block concurrent updates at 1733751358668Preparing flush snapshotting stores in 96d9654456b57311f15f8834c0cf2325 at 1733751358668Finished memstore snapshotting hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1733751358668Flushing stores of hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. at 1733751358669 (+1 ms)Flushing 96d9654456b57311f15f8834c0cf2325/l: creating writer at 1733751358669Flushing 96d9654456b57311f15f8834c0cf2325/l: appending metadata at 1733751358682 (+13 ms)Flushing 96d9654456b57311f15f8834c0cf2325/l: closing flushed file at 1733751358682Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fcf872d: reopening flushed file at 1733751358691 (+9 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 96d9654456b57311f15f8834c0cf2325 in 29ms, sequenceid=37, compaction requested=false at 1733751358697 (+6 ms)Writing region close event to WAL at 1733751358698 (+1 ms)Running coprocessor post-close hooks at 1733751358701 (+3 ms)Closed at 1733751358701 2024-12-09T13:35:58,701 DEBUG [RS_CLOSE_REGION-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733750971035.96d9654456b57311f15f8834c0cf2325. 2024-12-09T13:35:58,704 INFO [regionserver/c48e4b8eb196:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T13:35:58,709 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/ns/fb8bd57c94914a7d87a45ff56a1cc7ca is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd./ns:/1733751324476/DeleteFamily/seqid=0 2024-12-09T13:35:58,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742420_1596 (size=5927) 2024-12-09T13:35:58,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742420_1596 (size=5927) 2024-12-09T13:35:58,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742420_1596 (size=5927) 2024-12-09T13:35:58,714 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/ns/fb8bd57c94914a7d87a45ff56a1cc7ca 2024-12-09T13:35:58,719 INFO [regionserver/c48e4b8eb196:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T13:35:58,719 INFO [regionserver/c48e4b8eb196:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T13:35:58,722 INFO [regionserver/c48e4b8eb196:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T13:35:58,726 INFO [regionserver/c48e4b8eb196:0.Chore.2 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T13:35:58,738 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/rep_barrier/5838e4e755af4be0aa44b388162730cf is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd./rep_barrier:/1733751324476/DeleteFamily/seqid=0 2024-12-09T13:35:58,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742421_1597 (size=5990) 2024-12-09T13:35:58,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742421_1597 (size=5990) 2024-12-09T13:35:58,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742421_1597 (size=5990) 2024-12-09T13:35:58,743 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/rep_barrier/5838e4e755af4be0aa44b388162730cf 2024-12-09T13:35:58,750 INFO [regionserver/c48e4b8eb196:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T13:35:58,757 INFO [regionserver/c48e4b8eb196:0.Chore.2 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T13:35:58,765 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/table/0d077ef759074b248761ab571353d00c is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733751302276.3c8784dbaa49d85aceb6fab91f2628cd./table:/1733751324476/DeleteFamily/seqid=0 2024-12-09T13:35:58,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742422_1598 (size=6012) 2024-12-09T13:35:58,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742422_1598 (size=6012) 2024-12-09T13:35:58,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742422_1598 (size=6012) 2024-12-09T13:35:58,770 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/table/0d077ef759074b248761ab571353d00c 2024-12-09T13:35:58,774 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/info/96e3ccfe18c34a0eabdfad42d881b5a5 as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/info/96e3ccfe18c34a0eabdfad42d881b5a5 2024-12-09T13:35:58,777 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/info/96e3ccfe18c34a0eabdfad42d881b5a5, entries=19, sequenceid=246, filesize=7.4 K 2024-12-09T13:35:58,778 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/ns/fb8bd57c94914a7d87a45ff56a1cc7ca as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/ns/fb8bd57c94914a7d87a45ff56a1cc7ca 2024-12-09T13:35:58,782 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/ns/fb8bd57c94914a7d87a45ff56a1cc7ca, entries=4, sequenceid=246, filesize=5.8 K 2024-12-09T13:35:58,782 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/rep_barrier/5838e4e755af4be0aa44b388162730cf as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/rep_barrier/5838e4e755af4be0aa44b388162730cf 2024-12-09T13:35:58,785 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/rep_barrier/5838e4e755af4be0aa44b388162730cf, entries=4, sequenceid=246, filesize=5.8 K 2024-12-09T13:35:58,786 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/.tmp/table/0d077ef759074b248761ab571353d00c as hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/table/0d077ef759074b248761ab571353d00c 2024-12-09T13:35:58,790 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/table/0d077ef759074b248761ab571353d00c, entries=6, sequenceid=246, filesize=5.9 K 2024-12-09T13:35:58,791 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~10.15 KB/10395, heapSize ~17.05 KB/17456, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=246, compaction requested=false 2024-12-09T13:35:58,791 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T13:35:58,795 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/data/hbase/meta/1588230740/recovered.edits/249.seqid, newMaxSeqId=249, maxSeqId=214 2024-12-09T13:35:58,795 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:35:58,795 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T13:35:58,796 INFO [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T13:35:58,796 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733751358669Running coprocessor pre-close hooks at 1733751358669Disabling compacts and flushes for region at 1733751358669Disabling writes for close at 1733751358669Obtaining lock to block concurrent updates at 1733751358669Preparing flush snapshotting stores in 1588230740 at 1733751358669Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=10395, getHeapSize=17456, getOffHeapSize=0, getCellsCount=81 at 1733751358670 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733751358670Flushing 1588230740/info: creating writer at 1733751358670Flushing 1588230740/info: appending metadata at 1733751358689 (+19 ms)Flushing 1588230740/info: closing flushed file at 1733751358689Flushing 1588230740/ns: creating writer at 1733751358697 (+8 ms)Flushing 1588230740/ns: appending metadata at 1733751358709 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733751358709Flushing 1588230740/rep_barrier: creating writer at 1733751358718 (+9 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733751358737 (+19 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733751358737Flushing 1588230740/table: creating writer at 1733751358747 (+10 ms)Flushing 1588230740/table: appending metadata at 1733751358765 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733751358765Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1eef8b65: reopening flushed file at 1733751358773 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11a8d6ce: reopening flushed file at 1733751358778 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5af299ab: reopening flushed file at 1733751358782 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5980e2ff: reopening flushed file at 1733751358786 (+4 ms)Finished flush of dataSize ~10.15 KB/10395, heapSize ~17.05 KB/17456, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=246, compaction requested=false at 1733751358791 (+5 ms)Writing region close event to WAL at 1733751358792 (+1 ms)Running coprocessor post-close hooks at 1733751358795 (+3 ms)Closed at 1733751358796 (+1 ms) 2024-12-09T13:35:58,796 DEBUG [RS_CLOSE_META-regionserver/c48e4b8eb196:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T13:35:58,868 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(976): stopping server c48e4b8eb196,44683,1733750968041; all regions closed. 2024-12-09T13:35:58,868 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(976): stopping server c48e4b8eb196,39211,1733750968235; all regions closed. 2024-12-09T13:35:58,869 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(976): stopping server c48e4b8eb196,36493,1733750968163; all regions closed. 2024-12-09T13:35:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741835_1011 (size=15940) 2024-12-09T13:35:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742326_1502 (size=13036) 2024-12-09T13:35:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742326_1502 (size=13036) 2024-12-09T13:35:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741836_1012 (size=89810) 2024-12-09T13:35:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741835_1011 (size=15940) 2024-12-09T13:35:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741836_1012 (size=89810) 2024-12-09T13:35:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741836_1012 (size=89810) 2024-12-09T13:35:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741835_1011 (size=15940) 2024-12-09T13:35:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742326_1502 (size=13036) 2024-12-09T13:35:58,878 DEBUG [RS:0;c48e4b8eb196:44683 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs 2024-12-09T13:35:58,878 DEBUG [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs 2024-12-09T13:35:58,878 DEBUG [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs 2024-12-09T13:35:58,878 INFO [RS:0;c48e4b8eb196:44683 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c48e4b8eb196%2C44683%2C1733750968041:(num 1733750969889) 2024-12-09T13:35:58,878 INFO [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c48e4b8eb196%2C39211%2C1733750968235.meta:.meta(num 1733750970412) 2024-12-09T13:35:58,878 INFO [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c48e4b8eb196%2C36493%2C1733750968163.meta:.meta(num 1733751272326) 2024-12-09T13:35:58,878 DEBUG [RS:0;c48e4b8eb196:44683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,878 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T13:35:58,878 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T13:35:58,879 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.ChoreService(370): Chore service for: regionserver/c48e4b8eb196:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T13:35:58,879 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T13:35:58,879 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T13:35:58,879 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T13:35:58,879 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T13:35:58,879 INFO [regionserver/c48e4b8eb196:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T13:35:58,879 INFO [RS:0;c48e4b8eb196:44683 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44683 2024-12-09T13:35:58,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741833_1009 (size=18906) 2024-12-09T13:35:58,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073741834_1010 (size=9300) 2024-12-09T13:35:58,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741833_1009 (size=18906) 2024-12-09T13:35:58,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741833_1009 (size=18906) 2024-12-09T13:35:58,882 DEBUG [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs 2024-12-09T13:35:58,882 INFO [RS:2;c48e4b8eb196:39211 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c48e4b8eb196%2C39211%2C1733750968235:(num 1733750969886) 2024-12-09T13:35:58,882 DEBUG [RS:2;c48e4b8eb196:39211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,882 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T13:35:58,882 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T13:35:58,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073741834_1010 (size=9300) 2024-12-09T13:35:58,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073741834_1010 (size=9300) 2024-12-09T13:35:58,882 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.ChoreService(370): Chore service for: regionserver/c48e4b8eb196:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T13:35:58,883 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T13:35:58,883 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T13:35:58,883 INFO [regionserver/c48e4b8eb196:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T13:35:58,883 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T13:35:58,883 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T13:35:58,883 INFO [RS:2;c48e4b8eb196:39211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39211 2024-12-09T13:35:58,884 DEBUG [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/oldWALs 2024-12-09T13:35:58,884 INFO [RS:1;c48e4b8eb196:36493 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c48e4b8eb196%2C36493%2C1733750968163:(num 1733750969890) 2024-12-09T13:35:58,884 DEBUG [RS:1;c48e4b8eb196:36493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T13:35:58,884 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T13:35:58,884 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T13:35:58,884 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.ChoreService(370): Chore service for: regionserver/c48e4b8eb196:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T13:35:58,884 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T13:35:58,884 INFO [regionserver/c48e4b8eb196:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T13:35:58,885 INFO [RS:1;c48e4b8eb196:36493 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36493 2024-12-09T13:35:58,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T13:35:58,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c48e4b8eb196,44683,1733750968041 2024-12-09T13:35:58,906 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T13:35:58,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c48e4b8eb196,39211,1733750968235 2024-12-09T13:35:58,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c48e4b8eb196,36493,1733750968163 2024-12-09T13:35:58,915 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T13:35:58,915 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T13:35:58,923 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c48e4b8eb196,39211,1733750968235] 2024-12-09T13:35:58,939 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c48e4b8eb196,39211,1733750968235 already deleted, retry=false 2024-12-09T13:35:58,940 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c48e4b8eb196,39211,1733750968235 expired; onlineServers=2 2024-12-09T13:35:58,940 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c48e4b8eb196,44683,1733750968041] 2024-12-09T13:35:58,975 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c48e4b8eb196,44683,1733750968041 already deleted, retry=false 2024-12-09T13:35:58,975 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c48e4b8eb196,44683,1733750968041 expired; onlineServers=1 2024-12-09T13:35:58,975 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c48e4b8eb196,36493,1733750968163] 2024-12-09T13:35:58,989 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c48e4b8eb196,36493,1733750968163 already deleted, retry=false 2024-12-09T13:35:58,989 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c48e4b8eb196,36493,1733750968163 expired; onlineServers=0 2024-12-09T13:35:58,989 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c48e4b8eb196,45049,1733750967073' ***** 2024-12-09T13:35:58,989 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T13:35:58,990 INFO [M:0;c48e4b8eb196:45049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T13:35:58,990 INFO [M:0;c48e4b8eb196:45049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T13:35:58,990 DEBUG [M:0;c48e4b8eb196:45049 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T13:35:58,990 DEBUG [M:0;c48e4b8eb196:45049 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T13:35:58,990 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T13:35:58,990 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster-HFileCleaner.large.0-1733750969499 {}] cleaner.HFileCleaner(306): Exit Thread[master/c48e4b8eb196:0:becomeActiveMaster-HFileCleaner.large.0-1733750969499,5,FailOnTimeoutGroup] 2024-12-09T13:35:58,990 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster-HFileCleaner.small.0-1733750969505 {}] cleaner.HFileCleaner(306): Exit Thread[master/c48e4b8eb196:0:becomeActiveMaster-HFileCleaner.small.0-1733750969505,5,FailOnTimeoutGroup] 2024-12-09T13:35:58,990 INFO [M:0;c48e4b8eb196:45049 {}] hbase.ChoreService(370): Chore service for: master/c48e4b8eb196:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T13:35:58,990 INFO [M:0;c48e4b8eb196:45049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T13:35:58,990 DEBUG [M:0;c48e4b8eb196:45049 {}] master.HMaster(1795): Stopping service threads 2024-12-09T13:35:58,990 INFO [M:0;c48e4b8eb196:45049 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T13:35:58,990 INFO [M:0;c48e4b8eb196:45049 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T13:35:58,991 INFO [M:0;c48e4b8eb196:45049 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T13:35:58,991 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T13:35:58,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T13:35:58,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T13:35:58,998 DEBUG [M:0;c48e4b8eb196:45049 {}] zookeeper.ZKUtil(347): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T13:35:58,998 WARN [M:0;c48e4b8eb196:45049 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T13:35:58,999 INFO [M:0;c48e4b8eb196:45049 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/.lastflushedseqids 2024-12-09T13:35:59,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33239 is added to blk_1073742423_1599 (size=333) 2024-12-09T13:35:59,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42277 is added to blk_1073742423_1599 (size=333) 2024-12-09T13:35:59,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39425 is added to blk_1073742423_1599 (size=333) 2024-12-09T13:35:59,009 INFO [M:0;c48e4b8eb196:45049 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T13:35:59,009 INFO [M:0;c48e4b8eb196:45049 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T13:35:59,009 DEBUG [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T13:35:59,019 INFO [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:35:59,019 DEBUG [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:35:59,019 DEBUG [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T13:35:59,019 DEBUG [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T13:35:59,020 INFO [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=1001.24 KB heapSize=1.17 MB 2024-12-09T13:35:59,020 ERROR [AsyncFSWAL-0-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:35:59,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:35:59,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44683-0x1000ad205eb0001, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:35:59,024 INFO [RS:0;c48e4b8eb196:44683 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T13:35:59,024 INFO [RS:0;c48e4b8eb196:44683 {}] regionserver.HRegionServer(1031): Exiting; stopping=c48e4b8eb196,44683,1733750968041; zookeeper connection closed. 2024-12-09T13:35:59,024 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@11189a85 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@11189a85 2024-12-09T13:35:59,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:35:59,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:35:59,031 INFO [RS:1;c48e4b8eb196:36493 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T13:35:59,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39211-0x1000ad205eb0003, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:35:59,031 INFO [RS:2;c48e4b8eb196:39211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T13:35:59,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36493-0x1000ad205eb0002, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:35:59,031 INFO [RS:1;c48e4b8eb196:36493 {}] regionserver.HRegionServer(1031): Exiting; stopping=c48e4b8eb196,36493,1733750968163; zookeeper connection closed. 2024-12-09T13:35:59,031 INFO [RS:2;c48e4b8eb196:39211 {}] regionserver.HRegionServer(1031): Exiting; stopping=c48e4b8eb196,39211,1733750968235; zookeeper connection closed. 2024-12-09T13:35:59,031 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1edfaf1c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1edfaf1c 2024-12-09T13:35:59,032 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1bf3f6c6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1bf3f6c6 2024-12-09T13:35:59,032 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T13:36:04,185 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:36:07,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:36:07,484 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T13:36:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T13:36:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-09T13:36:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-09T13:36:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:36:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-09T13:36:07,485 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-09T13:36:12,988 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:36:26,023 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:36:56,024 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;c48e4b8eb196:45049 233 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 42 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@7a370cf7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65667fc7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@1719f574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13033 Waited count: 13682 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@66f7b3ff Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@30b960a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@64a549f8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:34039}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 3252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef6b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45819): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44875 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45819): State: TIMED_WAITING Blocked count: 117 Waited count: 2500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45819): State: TIMED_WAITING Blocked count: 130 Waited count: 2492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45819): State: TIMED_WAITING Blocked count: 123 Waited count: 2483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45819): State: TIMED_WAITING Blocked count: 119 Waited count: 2479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45819): State: TIMED_WAITING Blocked count: 132 Waited count: 2489 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88-acceptor-0@348693b2-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:43791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1684352221-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 910 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37147): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 326 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35e2dc19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1430 Waited count: 1532 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:45819 from jenkins): State: TIMED_WAITING Blocked count: 1322 Waited count: 1323 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1224444869-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1224444869-122-acceptor-0@41a524a5-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40739}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 0 Waited count: 2115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1224444869-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 8 Waited count: 907 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35267): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 325 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1383 Waited count: 1525 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@4b562903-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:40615}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 909 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42289): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 280 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4153b16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1360 Waited count: 1518 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 455 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3)): State: TIMED_WAITING Blocked count: 18 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@42cfa18d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@171fd18f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@10ca87e5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 227 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 18 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57683):): State: WAITING Blocked count: 4 Waited count: 489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad84141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 515 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 247 (LeaseRenewer:jenkins@localhost:45819): State: TIMED_WAITING Blocked count: 13 Waited count: 472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7a5b3498 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 497 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d4d51a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 26 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31d9b499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1205b9ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 98 Waited count: 457 Waiting on java.util.concurrent.Semaphore$NonfairSync@dfdb43d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 146 Waited count: 546 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aaa0c4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049): State: WAITING Blocked count: 42 Waited count: 9206 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@662b0efc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39e30b64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7361b878 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4d831406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@6cfca368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de53db8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 110 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;c48e4b8eb196:45049): State: TIMED_WAITING Blocked count: 12 Waited count: 3914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007facf8f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@30fec42e): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4483 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 104 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 102 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44902 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 442 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ed4d5d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dfe0016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e7d3c29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 504 (LeaseRenewer:jenkins.hfs.2@localhost:45819): State: TIMED_WAITING Blocked count: 12 Waited count: 470 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (LeaseRenewer:jenkins.hfs.1@localhost:45819): State: TIMED_WAITING Blocked count: 12 Waited count: 469 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (LeaseRenewer:jenkins.hfs.0@localhost:45819): State: TIMED_WAITING Blocked count: 13 Waited count: 471 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 15 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44741 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 760 Waiting on java.util.concurrent.ForkJoinPool@71f2ce42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 585 (region-location-1): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 989 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bba7b15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1223 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3758fe49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2254 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 683 Waiting on java.util.concurrent.ForkJoinPool@71f2ce42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2627 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2966 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10294 (AsyncFSWAL-1-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@503fa63e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10297 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-09T13:37:26,024 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:37:56,024 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;c48e4b8eb196:45049 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 42 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@7a370cf7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65667fc7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5190 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@3852a26c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13033 Waited count: 13683 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@66f7b3ff Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@30b960a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1033 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@64a549f8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:34039}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 3252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef6b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45819): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50821 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45819): State: TIMED_WAITING Blocked count: 117 Waited count: 2561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45819): State: TIMED_WAITING Blocked count: 130 Waited count: 2553 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45819): State: TIMED_WAITING Blocked count: 123 Waited count: 2543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45819): State: TIMED_WAITING Blocked count: 119 Waited count: 2540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45819): State: TIMED_WAITING Blocked count: 132 Waited count: 2550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88-acceptor-0@348693b2-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:43791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1684352221-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1030 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37147): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 346 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35e2dc19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1450 Waited count: 1572 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 546 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:45819 from jenkins): State: TIMED_WAITING Blocked count: 1382 Waited count: 1383 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1224444869-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1224444869-122-acceptor-0@41a524a5-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40739}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 0 Waited count: 2175 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1224444869-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 8 Waited count: 1027 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35267): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 345 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1403 Waited count: 1565 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@4b562903-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:40615}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1029 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42289): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 300 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4153b16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1380 Waited count: 1558 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3)): State: TIMED_WAITING Blocked count: 18 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@42cfa18d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@171fd18f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@10ca87e5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 257 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 18 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57683):): State: WAITING Blocked count: 4 Waited count: 493 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad84141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 519 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7a5b3498 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 545 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d4d51a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 26 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31d9b499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1205b9ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 98 Waited count: 457 Waiting on java.util.concurrent.Semaphore$NonfairSync@dfdb43d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 146 Waited count: 546 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aaa0c4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049): State: WAITING Blocked count: 42 Waited count: 9206 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@662b0efc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39e30b64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7361b878 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4d831406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@6cfca368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de53db8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 110 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;c48e4b8eb196:45049): State: TIMED_WAITING Blocked count: 12 Waited count: 3914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007facf8f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@30fec42e): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5083 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 104 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 102 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 165 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b4d1fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50907 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 442 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ed4d5d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dfe0016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e7d3c29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 528 (region-location-0): State: WAITING Blocked count: 15 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50746 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 760 Waiting on java.util.concurrent.ForkJoinPool@71f2ce42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 585 (region-location-1): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 989 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bba7b15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1223 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3758fe49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2254 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 684 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2966 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10294 (AsyncFSWAL-1-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@503fa63e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10297 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-09T13:38:26,025 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:38:56,025 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;c48e4b8eb196:45049 227 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 42 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@7a370cf7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65667fc7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5790 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@4808b999 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13033 Waited count: 13684 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@66f7b3ff Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@30b960a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@64a549f8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:34039}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 3252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef6b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45819): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56775 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45819): State: TIMED_WAITING Blocked count: 117 Waited count: 2622 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45819): State: TIMED_WAITING Blocked count: 130 Waited count: 2614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45819): State: TIMED_WAITING Blocked count: 123 Waited count: 2604 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45819): State: TIMED_WAITING Blocked count: 119 Waited count: 2601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45819): State: TIMED_WAITING Blocked count: 132 Waited count: 2611 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 288 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88-acceptor-0@348693b2-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:43791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1684352221-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37147): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35e2dc19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1470 Waited count: 1612 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 606 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:45819 from jenkins): State: TIMED_WAITING Blocked count: 1442 Waited count: 1443 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1224444869-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1224444869-122-acceptor-0@41a524a5-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40739}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 0 Waited count: 2235 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1224444869-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 8 Waited count: 1147 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35267): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 365 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1423 Waited count: 1605 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@4b562903-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:40615}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42289): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 320 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4153b16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1400 Waited count: 1598 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3)): State: TIMED_WAITING Blocked count: 18 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@42cfa18d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@171fd18f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@10ca87e5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 287 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 18 Waited count: 372 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57683):): State: WAITING Blocked count: 4 Waited count: 498 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad84141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7a5b3498 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d4d51a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 26 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31d9b499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1205b9ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 98 Waited count: 457 Waiting on java.util.concurrent.Semaphore$NonfairSync@dfdb43d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 146 Waited count: 546 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aaa0c4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049): State: WAITING Blocked count: 42 Waited count: 9206 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@662b0efc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39e30b64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7361b878 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4d831406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@6cfca368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de53db8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 110 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;c48e4b8eb196:45049): State: TIMED_WAITING Blocked count: 12 Waited count: 3914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007facf8f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@30fec42e): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5683 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 104 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 102 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 165 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b4d1fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56909 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 442 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ed4d5d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dfe0016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e7d3c29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 528 (region-location-0): State: WAITING Blocked count: 15 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56747 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 761 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 585 (region-location-1): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 989 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bba7b15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1223 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3758fe49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10294 (AsyncFSWAL-1-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@503fa63e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10297 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-09T13:39:26,025 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:39:28,393 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=29, reuseRatio=74.36% 2024-12-09T13:39:28,393 DEBUG [master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T13:39:36,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-09T13:39:56,025 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;c48e4b8eb196:45049 227 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 42 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@7a370cf7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65667fc7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@652bf8f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13033 Waited count: 13685 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@66f7b3ff Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@30b960a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1273 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@64a549f8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:34039}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 3252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef6b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45819): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 213 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 62727 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45819): State: TIMED_WAITING Blocked count: 117 Waited count: 2682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45819): State: TIMED_WAITING Blocked count: 130 Waited count: 2675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45819): State: TIMED_WAITING Blocked count: 123 Waited count: 2665 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45819): State: TIMED_WAITING Blocked count: 119 Waited count: 2662 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45819): State: TIMED_WAITING Blocked count: 132 Waited count: 2672 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 318 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88-acceptor-0@348693b2-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:43791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1684352221-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1270 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37147): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 386 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35e2dc19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1490 Waited count: 1652 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 666 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:45819 from jenkins): State: TIMED_WAITING Blocked count: 1502 Waited count: 1503 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1224444869-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1224444869-122-acceptor-0@41a524a5-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40739}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 0 Waited count: 2295 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1224444869-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 8 Waited count: 1267 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35267): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 385 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1443 Waited count: 1645 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 637 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@4b562903-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:40615}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1269 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42289): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 340 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4153b16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1420 Waited count: 1638 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 637 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 636 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3)): State: TIMED_WAITING Blocked count: 18 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be83779 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@42cfa18d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@171fd18f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@10ca87e5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 317 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 18 Waited count: 376 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57683):): State: WAITING Blocked count: 4 Waited count: 502 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad84141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 528 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7a5b3498 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d4d51a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 26 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31d9b499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1205b9ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 98 Waited count: 457 Waiting on java.util.concurrent.Semaphore$NonfairSync@dfdb43d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 146 Waited count: 546 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aaa0c4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049): State: WAITING Blocked count: 42 Waited count: 9206 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@662b0efc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39e30b64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7361b878 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4d831406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@6cfca368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de53db8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 110 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;c48e4b8eb196:45049): State: TIMED_WAITING Blocked count: 12 Waited count: 3914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007facf8f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@30fec42e): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6282 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 104 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 102 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 165 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b4d1fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62911 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 442 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ed4d5d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dfe0016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e7d3c29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 528 (region-location-0): State: WAITING Blocked count: 15 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62749 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-1): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 989 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 760 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bba7b15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1223 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3758fe49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10294 (AsyncFSWAL-1-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@503fa63e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10298 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10303 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-09T13:40:26,026 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:40:56,026 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T13:40:59,021 DEBUG [M:0;c48e4b8eb196:45049 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733751359009Disabling compacts and flushes for region at 1733751359009Disabling writes for close at 1733751359019 (+10 ms)Obtaining lock to block concurrent updates at 1733751359020 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733751359020Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1025265, getHeapSize=1231192, getOffHeapSize=0, getCellsCount=2715 at 1733751359020Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733751659020 (+300000 ms) 2024-12-09T13:40:59,021 WARN [M:0;c48e4b8eb196:45049 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-09T13:40:59,022 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:40:59,023 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T13:40:59,023 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T13:40:59,023 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 2024-12-09T13:40:59,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:40:59,026 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:40:59,026 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 2024-12-09T13:40:59,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;c48e4b8eb196:45049 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 42 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@7a370cf7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65667fc7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6989 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 71 Waiting on java.util.concurrent.CountDownLatch$Sync@356bbffe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13033 Waited count: 13686 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 17 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@66f7b3ff Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@30b960a1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1393 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@64a549f8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:34039}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 29 Waited count: 3252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60ef6b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 45819): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 233 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 68680 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1347 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 45819): State: TIMED_WAITING Blocked count: 117 Waited count: 2744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 45819): State: TIMED_WAITING Blocked count: 130 Waited count: 2736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 45819): State: TIMED_WAITING Blocked count: 123 Waited count: 2727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 45819): State: TIMED_WAITING Blocked count: 119 Waited count: 2723 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 45819): State: TIMED_WAITING Blocked count: 132 Waited count: 2733 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88-acceptor-0@348693b2-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:43791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1684352221-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1390 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37147): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 406 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@35e2dc19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1510 Waited count: 1692 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37147): State: TIMED_WAITING Blocked count: 0 Waited count: 726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:45819 from jenkins): State: TIMED_WAITING Blocked count: 1562 Waited count: 1563 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1224444869-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1224444869-122-acceptor-0@41a524a5-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40739}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 0 Waited count: 2355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1224444869-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 8 Waited count: 1387 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35267): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 405 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1463 Waited count: 1685 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 35267): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007facf842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@4b562903-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:40615}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1389 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42289): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 360 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4153b16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819): State: TIMED_WAITING Blocked count: 1440 Waited count: 1678 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 696 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42289): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3)): State: TIMED_WAITING Blocked count: 18 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2)): State: TIMED_WAITING Blocked count: 16 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 213 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6be83779 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (java.util.concurrent.ThreadPoolExecutor$Worker@42cfa18d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@171fd18f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (java.util.concurrent.ThreadPoolExecutor$Worker@10ca87e5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 18 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:57683):): State: WAITING Blocked count: 4 Waited count: 507 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad84141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 533 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 103 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7a5b3498 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:57683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 16 Waited count: 63 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d4d51a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 7 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 26 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@31d9b499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4090a35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1205b9ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 98 Waited count: 457 Waiting on java.util.concurrent.Semaphore$NonfairSync@dfdb43d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 146 Waited count: 546 Waiting on java.util.concurrent.Semaphore$NonfairSync@1aaa0c4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45049): State: WAITING Blocked count: 42 Waited count: 9206 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@662b0efc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59df103e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39e30b64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7361b878 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4d831406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45049): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@6cfca368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de53db8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 335 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 110 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;c48e4b8eb196:45049): State: TIMED_WAITING Blocked count: 12 Waited count: 3915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1450/0x00007facf9239eb8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/c48e4b8eb196:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (org.apache.hadoop.hdfs.PeerCache@30fec42e): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6882 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 104 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 102 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 165 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b4d1fb2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68911 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 442 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 444 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 457 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ed4d5d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dfe0016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/c48e4b8eb196:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e7d3c29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 528 (region-location-0): State: WAITING Blocked count: 15 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68749 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 534 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-1): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (region-location-2): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 989 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 766 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1091 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bba7b15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1139 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1201 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1202 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1223 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1258 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3758fe49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@164f582b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10294 (AsyncFSWAL-1-hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData-prefix:c48e4b8eb196,45049,1733750967073): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@503fa63e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10303 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10306 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10307 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1437/0x00007facf9235338.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-09T13:41:03,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T13:41:04,022 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-09T13:41:04,023 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T13:41:04,023 INFO [M:0;c48e4b8eb196:45049 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T13:41:04,023 INFO [M:0;c48e4b8eb196:45049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45049 2024-12-09T13:41:04,023 INFO [M:0;c48e4b8eb196:45049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T13:41:04,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:45819/user/jenkins/test-data/6deec529-ac86-7ee8-40f4-7f7f27e374c7/MasterData/WALs/c48e4b8eb196,45049,1733750967073/c48e4b8eb196%2C45049%2C1733750967073.1733750968752 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-09T13:41:04,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:41:04,181 INFO [M:0;c48e4b8eb196:45049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T13:41:04,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45049-0x1000ad205eb0000, quorum=127.0.0.1:57683, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T13:41:04,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@474673d3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T13:41:04,185 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:41:04,185 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:41:04,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@108f4b55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T13:41:04,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2de28195{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:41:04,187 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T13:41:04,187 WARN [BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T13:41:04,187 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T13:41:04,187 WARN [BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-67561561-172.17.0.2-1733750962175 (Datanode Uuid cd327201-52fd-46ac-a68b-2f1a18c83819) service to localhost/127.0.0.1:45819 2024-12-09T13:41:04,189 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data6/current/BP-67561561-172.17.0.2-1733750962175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T13:41:04,190 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T13:41:04,191 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data5/current/BP-67561561-172.17.0.2-1733750962175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T13:41:04,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8fd4906{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T13:41:04,199 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:41:04,199 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:41:04,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1de9333b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T13:41:04,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32a76e2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:41:04,201 WARN [BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T13:41:04,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T13:41:04,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T13:41:04,201 WARN [BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-67561561-172.17.0.2-1733750962175 (Datanode Uuid 18a88ea6-5588-4239-b493-08d886227bd8) service to localhost/127.0.0.1:45819 2024-12-09T13:41:04,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data3/current/BP-67561561-172.17.0.2-1733750962175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T13:41:04,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data4/current/BP-67561561-172.17.0.2-1733750962175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T13:41:04,202 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T13:41:04,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74bb782c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T13:41:04,208 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:41:04,208 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:41:04,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3622d218{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T13:41:04,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4521559e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:41:04,210 WARN [BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T13:41:04,210 WARN [BP-67561561-172.17.0.2-1733750962175 heartbeating to localhost/127.0.0.1:45819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-67561561-172.17.0.2-1733750962175 (Datanode Uuid b6b1635a-69ea-425d-be53-eee8bef8009a) service to localhost/127.0.0.1:45819 2024-12-09T13:41:04,210 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T13:41:04,210 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T13:41:04,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data2/current/BP-67561561-172.17.0.2-1733750962175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T13:41:04,211 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/cluster_e0f8f88f-db72-959b-fb08-f06ad8e4a21c/data/data1/current/BP-67561561-172.17.0.2-1733750962175 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T13:41:04,211 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T13:41:04,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12351f7e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T13:41:04,236 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T13:41:04,236 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T13:41:04,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cd6ab6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T13:41:04,236 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@654c02d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master@2/hbase-mapreduce/target/test-data/6f80bb75-6f61-dca9-e971-f65185ebe7a4/hadoop.log.dir/,STOPPED} 2024-12-09T13:41:04,252 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T13:41:04,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down