2024-11-13 16:31:55,003 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-13 16:31:55,031 main DEBUG Took 0.025132 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 16:31:55,032 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 16:31:55,033 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 16:31:55,035 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 16:31:55,038 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,057 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 16:31:55,091 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,097 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,098 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,100 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,106 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,106 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,111 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,113 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,114 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,116 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,118 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,118 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,120 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,121 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,123 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,124 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,125 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,127 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,129 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,130 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,131 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,133 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,135 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,140 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 16:31:55,142 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,143 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 16:31:55,165 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 16:31:55,167 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 16:31:55,170 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 16:31:55,183 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 16:31:55,185 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 16:31:55,185 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 16:31:55,200 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 16:31:55,203 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 16:31:55,210 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 16:31:55,211 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 16:31:55,212 main DEBUG createAppenders(={Console}) 2024-11-13 16:31:55,213 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-11-13 16:31:55,216 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-13 16:31:55,217 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-11-13 16:31:55,218 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 16:31:55,218 main DEBUG OutputStream closed 2024-11-13 16:31:55,219 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 16:31:55,219 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 16:31:55,221 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-11-13 16:31:55,454 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 16:31:55,459 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 16:31:55,461 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 16:31:55,463 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 16:31:55,464 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 16:31:55,464 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 16:31:55,465 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 16:31:55,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 16:31:55,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 16:31:55,484 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 16:31:55,484 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 16:31:55,485 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 16:31:55,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 16:31:55,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 16:31:55,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 16:31:55,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 16:31:55,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 16:31:55,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 16:31:55,518 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 16:31:55,519 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-11-13 16:31:55,520 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 16:31:55,521 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-11-13T16:31:55,561 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-13 16:31:55,583 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 16:31:55,584 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T16:31:56,424 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c 2024-11-13T16:31:56,425 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-11-13T16:31:56,436 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-11-13T16:31:56,553 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T16:31:56,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T16:31:56,988 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8, deleteOnExit=true 2024-11-13T16:31:56,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T16:31:56,991 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/test.cache.data in system properties and HBase conf 2024-11-13T16:31:56,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T16:31:56,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir in system properties and HBase conf 2024-11-13T16:31:56,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T16:31:56,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T16:31:56,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T16:31:57,265 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T16:31:57,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T16:31:57,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T16:31:57,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T16:31:57,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T16:31:57,286 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T16:31:57,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T16:31:57,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T16:31:57,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T16:31:57,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T16:31:57,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/nfs.dump.dir in system properties and HBase conf 2024-11-13T16:31:57,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir in system properties and HBase conf 2024-11-13T16:31:57,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T16:31:57,307 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T16:31:57,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T16:32:00,134 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T16:32:00,361 INFO [Time-limited test {}] log.Log(170): Logging initialized @6968ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T16:32:00,718 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:01,016 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:01,204 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:01,204 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:01,207 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T16:32:01,276 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:01,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a95d1d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:01,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2659399f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:01,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f111d3b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-42775-hadoop-hdfs-3_4_1-tests_jar-_-any-4740851655539251932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T16:32:01,769 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:42775} 2024-11-13T16:32:01,770 INFO [Time-limited test {}] server.Server(415): Started @8378ms 2024-11-13T16:32:02,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:02,656 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:02,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:02,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:02,660 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T16:32:02,668 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48d007cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:02,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c56f4f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:02,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75d3cbc1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-34985-hadoop-hdfs-3_4_1-tests_jar-_-any-3336440269635862419/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T16:32:02,850 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:34985} 2024-11-13T16:32:02,851 INFO [Time-limited test {}] server.Server(415): Started @9460ms 2024-11-13T16:32:02,958 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T16:32:03,341 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:03,374 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:03,385 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:03,385 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:03,385 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T16:32:03,390 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4628bbe8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:03,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39ddae33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:03,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68f857ff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-36973-hadoop-hdfs-3_4_1-tests_jar-_-any-12031521967631821505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T16:32:03,547 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:36973} 2024-11-13T16:32:03,547 INFO [Time-limited test {}] server.Server(415): Started @10156ms 2024-11-13T16:32:03,554 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T16:32:03,710 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:03,717 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:03,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:03,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:03,720 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T16:32:03,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78f1b706{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:03,723 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d51af8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:03,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1621d99c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-33471-hadoop-hdfs-3_4_1-tests_jar-_-any-10596397101983606317/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T16:32:03,860 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:33471} 2024-11-13T16:32:03,860 INFO [Time-limited test {}] server.Server(415): Started @10469ms 2024-11-13T16:32:03,865 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T16:32:05,274 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385/current, will proceed with Du for space computation calculation, 2024-11-13T16:32:05,274 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385/current, will proceed with Du for space computation calculation, 2024-11-13T16:32:05,275 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385/current, will proceed with Du for space computation calculation, 2024-11-13T16:32:05,275 WARN [Thread-127 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385/current, will proceed with Du for space computation calculation, 2024-11-13T16:32:05,300 WARN [Thread-128 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385/current, will proceed with Du for space computation calculation, 2024-11-13T16:32:05,307 WARN [Thread-129 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385/current, will proceed with Du for space computation calculation, 2024-11-13T16:32:05,627 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T16:32:05,708 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T16:32:05,714 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T16:32:05,861 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22f0a61e646102ed with lease ID 0xa3bf281a33505d18: Processing first storage report for DS-32a1c586-35ae-429b-b238-eeb62a943751 from datanode DatanodeRegistration(127.0.0.1:43257, datanodeUuid=01707b01-a2f9-4c88-9327-5f56d6c85f89, infoPort=40831, infoSecurePort=0, ipcPort=42231, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385) 2024-11-13T16:32:05,863 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22f0a61e646102ed with lease ID 0xa3bf281a33505d18: from storage DS-32a1c586-35ae-429b-b238-eeb62a943751 node DatanodeRegistration(127.0.0.1:43257, datanodeUuid=01707b01-a2f9-4c88-9327-5f56d6c85f89, infoPort=40831, infoSecurePort=0, ipcPort=42231, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T16:32:05,863 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64183ac1659098de with lease ID 0xa3bf281a33505d1a: Processing first storage report for DS-b7f07e8f-e796-4039-9dba-04979c32c609 from datanode DatanodeRegistration(127.0.0.1:46053, datanodeUuid=05a333e2-dd09-41c1-aa27-69e7d73515c0, infoPort=34817, infoSecurePort=0, ipcPort=36869, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385) 2024-11-13T16:32:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64183ac1659098de with lease ID 0xa3bf281a33505d1a: from storage DS-b7f07e8f-e796-4039-9dba-04979c32c609 node DatanodeRegistration(127.0.0.1:46053, datanodeUuid=05a333e2-dd09-41c1-aa27-69e7d73515c0, infoPort=34817, infoSecurePort=0, ipcPort=36869, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T16:32:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x22f0a61e646102ed with lease ID 0xa3bf281a33505d18: Processing first storage report for DS-374dcfa0-64f3-4a0a-996d-54f688dc8a74 from datanode DatanodeRegistration(127.0.0.1:43257, datanodeUuid=01707b01-a2f9-4c88-9327-5f56d6c85f89, infoPort=40831, infoSecurePort=0, ipcPort=42231, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385) 2024-11-13T16:32:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22f0a61e646102ed with lease ID 0xa3bf281a33505d18: from storage DS-374dcfa0-64f3-4a0a-996d-54f688dc8a74 node DatanodeRegistration(127.0.0.1:43257, datanodeUuid=01707b01-a2f9-4c88-9327-5f56d6c85f89, infoPort=40831, infoSecurePort=0, ipcPort=42231, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T16:32:05,864 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x64183ac1659098de with lease ID 0xa3bf281a33505d1a: Processing first storage report for DS-82760825-2ad7-4bb5-ad93-80b877275982 from datanode DatanodeRegistration(127.0.0.1:46053, datanodeUuid=05a333e2-dd09-41c1-aa27-69e7d73515c0, infoPort=34817, infoSecurePort=0, ipcPort=36869, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385) 2024-11-13T16:32:05,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x64183ac1659098de with lease ID 0xa3bf281a33505d1a: from storage DS-82760825-2ad7-4bb5-ad93-80b877275982 node DatanodeRegistration(127.0.0.1:46053, datanodeUuid=05a333e2-dd09-41c1-aa27-69e7d73515c0, infoPort=34817, infoSecurePort=0, ipcPort=36869, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T16:32:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc09c7007b6c9f05c with lease ID 0xa3bf281a33505d19: Processing first storage report for DS-ccd91292-b849-4472-a6b5-6c3406b0f09b from datanode DatanodeRegistration(127.0.0.1:33001, datanodeUuid=1dc0131a-56f5-444b-8ad1-45336a30e1cb, infoPort=36461, infoSecurePort=0, ipcPort=42229, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385) 2024-11-13T16:32:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc09c7007b6c9f05c with lease ID 0xa3bf281a33505d19: from storage DS-ccd91292-b849-4472-a6b5-6c3406b0f09b node DatanodeRegistration(127.0.0.1:33001, datanodeUuid=1dc0131a-56f5-444b-8ad1-45336a30e1cb, infoPort=36461, infoSecurePort=0, ipcPort=42229, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T16:32:05,876 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc09c7007b6c9f05c with lease ID 0xa3bf281a33505d19: Processing first storage report for DS-1a990bb6-802f-48a5-802e-be2e4770e41f from datanode DatanodeRegistration(127.0.0.1:33001, datanodeUuid=1dc0131a-56f5-444b-8ad1-45336a30e1cb, infoPort=36461, infoSecurePort=0, ipcPort=42229, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385) 2024-11-13T16:32:05,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc09c7007b6c9f05c with lease ID 0xa3bf281a33505d19: from storage DS-1a990bb6-802f-48a5-802e-be2e4770e41f node DatanodeRegistration(127.0.0.1:33001, datanodeUuid=1dc0131a-56f5-444b-8ad1-45336a30e1cb, infoPort=36461, infoSecurePort=0, ipcPort=42229, storageInfo=lv=-57;cid=testClusterID;nsid=473086669;c=1731515519385), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T16:32:05,891 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c 2024-11-13T16:32:06,154 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/zookeeper_0, clientPort=58220, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T16:32:06,187 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58220 2024-11-13T16:32:06,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:06,209 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:06,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741825_1001 (size=7) 2024-11-13T16:32:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741825_1001 (size=7) 2024-11-13T16:32:06,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741825_1001 (size=7) 2024-11-13T16:32:06,758 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce with version=8 2024-11-13T16:32:06,759 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/hbase-staging 2024-11-13T16:32:06,956 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T16:32:07,304 INFO [Time-limited test {}] client.ConnectionUtils(128): master/f8f85b0bb1d8:0 server-side Connection retries=45 2024-11-13T16:32:07,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:07,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:07,328 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T16:32:07,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:07,329 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T16:32:07,632 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T16:32:07,724 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T16:32:07,737 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T16:32:07,741 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T16:32:07,781 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 78946 (auto-detected) 2024-11-13T16:32:07,784 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-13T16:32:07,839 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45809 2024-11-13T16:32:07,925 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45809 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-13T16:32:08,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458090x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T16:32:08,085 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45809-0x1013637f1800000 connected 2024-11-13T16:32:08,321 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:08,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:08,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:32:08,381 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce, hbase.cluster.distributed=false 2024-11-13T16:32:08,502 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T16:32:08,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45809 2024-11-13T16:32:08,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45809 2024-11-13T16:32:08,582 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45809 2024-11-13T16:32:08,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45809 2024-11-13T16:32:08,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45809 2024-11-13T16:32:08,858 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f8f85b0bb1d8:0 server-side Connection retries=45 2024-11-13T16:32:08,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:08,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:08,864 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T16:32:08,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:08,864 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T16:32:08,869 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T16:32:08,893 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T16:32:08,903 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38437 2024-11-13T16:32:08,909 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38437 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-13T16:32:08,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:08,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:08,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384370x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T16:32:08,964 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384370x0, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:32:08,971 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T16:32:08,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38437-0x1013637f1800001 connected 2024-11-13T16:32:08,991 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-13T16:32:09,009 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T16:32:09,024 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T16:32:09,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38437 2024-11-13T16:32:09,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38437 2024-11-13T16:32:09,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38437 2024-11-13T16:32:09,075 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38437 2024-11-13T16:32:09,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38437 2024-11-13T16:32:09,106 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f8f85b0bb1d8:0 server-side Connection retries=45 2024-11-13T16:32:09,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:09,112 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:09,113 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T16:32:09,113 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:09,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T16:32:09,116 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T16:32:09,116 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T16:32:09,118 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39619 2024-11-13T16:32:09,121 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39619 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-13T16:32:09,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:09,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:09,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396190x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T16:32:09,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:396190x0, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:32:09,216 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T16:32:09,217 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39619-0x1013637f1800002 connected 2024-11-13T16:32:09,223 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-13T16:32:09,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T16:32:09,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T16:32:09,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39619 2024-11-13T16:32:09,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39619 2024-11-13T16:32:09,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39619 2024-11-13T16:32:09,249 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39619 2024-11-13T16:32:09,250 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39619 2024-11-13T16:32:09,293 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/f8f85b0bb1d8:0 server-side Connection retries=45 2024-11-13T16:32:09,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:09,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:09,293 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T16:32:09,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T16:32:09,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T16:32:09,294 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T16:32:09,294 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T16:32:09,299 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38523 2024-11-13T16:32:09,302 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38523 connecting to ZooKeeper ensemble=127.0.0.1:58220 2024-11-13T16:32:09,305 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:09,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:09,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385230x0, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T16:32:09,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:32:09,363 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38523-0x1013637f1800003 connected 2024-11-13T16:32:09,363 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T16:32:09,364 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-13T16:32:09,366 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T16:32:09,369 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T16:32:09,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38523 2024-11-13T16:32:09,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38523 2024-11-13T16:32:09,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38523 2024-11-13T16:32:09,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38523 2024-11-13T16:32:09,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38523 2024-11-13T16:32:09,403 DEBUG [M:0;f8f85b0bb1d8:45809 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;f8f85b0bb1d8:45809 2024-11-13T16:32:09,405 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:09,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,433 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:09,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T16:32:09,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:09,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T16:32:09,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:09,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:09,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T16:32:09,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:09,488 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T16:32:09,491 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/f8f85b0bb1d8,45809,1731515527071 from backup master directory 2024-11-13T16:32:09,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:09,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T16:32:09,515 WARN [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T16:32:09,515 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:09,525 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T16:32:09,528 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T16:32:09,680 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/hbase.id] with ID: 189d46b3-8410-4d36-80e0-1d6ce58cf995 2024-11-13T16:32:09,680 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.tmp/hbase.id 2024-11-13T16:32:09,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741826_1002 (size=42) 2024-11-13T16:32:09,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741826_1002 (size=42) 2024-11-13T16:32:09,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741826_1002 (size=42) 2024-11-13T16:32:09,786 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.tmp/hbase.id]:[hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/hbase.id] 2024-11-13T16:32:10,002 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T16:32:10,010 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T16:32:10,051 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 37ms. 2024-11-13T16:32:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:10,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:10,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:10,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741827_1003 (size=196) 2024-11-13T16:32:10,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741827_1003 (size=196) 2024-11-13T16:32:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741827_1003 (size=196) 2024-11-13T16:32:10,206 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:32:10,211 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T16:32:10,248 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:32:10,267 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T16:32:10,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741828_1004 (size=1189) 2024-11-13T16:32:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741828_1004 (size=1189) 2024-11-13T16:32:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741828_1004 (size=1189) 2024-11-13T16:32:10,534 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/data/master/store 2024-11-13T16:32:10,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741829_1005 (size=34) 2024-11-13T16:32:10,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741829_1005 (size=34) 2024-11-13T16:32:10,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741829_1005 (size=34) 2024-11-13T16:32:10,668 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T16:32:10,672 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:10,673 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T16:32:10,674 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:32:10,674 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:32:10,676 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T16:32:10,677 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:32:10,677 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:32:10,678 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731515530673Disabling compacts and flushes for region at 1731515530673Disabling writes for close at 1731515530676 (+3 ms)Writing region close event to WAL at 1731515530677 (+1 ms)Closed at 1731515530677 2024-11-13T16:32:10,681 WARN [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/data/master/store/.initializing 2024-11-13T16:32:10,682 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:10,707 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T16:32:10,760 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f8f85b0bb1d8%2C45809%2C1731515527071, suffix=, logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071, archiveDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/oldWALs, maxLogs=10 2024-11-13T16:32:10,807 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769, exclude list is [], retry=0 2024-11-13T16:32:10,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-b7f07e8f-e796-4039-9dba-04979c32c609,DISK] 2024-11-13T16:32:10,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43257,DS-32a1c586-35ae-429b-b238-eeb62a943751,DISK] 2024-11-13T16:32:10,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33001,DS-ccd91292-b849-4472-a6b5-6c3406b0f09b,DISK] 2024-11-13T16:32:10,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-13T16:32:10,933 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 2024-11-13T16:32:10,948 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36461:36461),(127.0.0.1/127.0.0.1:40831:40831),(127.0.0.1/127.0.0.1:34817:34817)] 2024-11-13T16:32:10,950 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T16:32:10,952 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:10,959 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:10,961 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T16:32:11,094 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:11,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:11,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,110 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T16:32:11,111 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:11,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:32:11,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T16:32:11,122 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:11,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:32:11,128 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T16:32:11,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:11,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:32:11,149 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,172 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,178 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,231 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,232 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,240 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T16:32:11,263 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T16:32:11,307 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:32:11,311 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69662143, jitterRate=0.03804682195186615}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T16:32:11,339 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731515530982Initializing all the Stores at 1731515530993 (+11 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515530994 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515530995 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515530995Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515530995Cleaning up temporary data from old regions at 1731515531232 (+237 ms)Region opened successfully at 1731515531339 (+107 ms) 2024-11-13T16:32:11,347 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T16:32:11,443 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48de1d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f8f85b0bb1d8/172.17.0.3:0 2024-11-13T16:32:11,507 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T16:32:11,528 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T16:32:11,528 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T16:32:11,539 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T16:32:11,551 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 11 msec 2024-11-13T16:32:11,559 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-11-13T16:32:11,560 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T16:32:11,676 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T16:32:11,705 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T16:32:11,743 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T16:32:11,755 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T16:32:11,771 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T16:32:11,788 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T16:32:11,792 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T16:32:11,811 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T16:32:11,828 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T16:32:11,837 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T16:32:11,855 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T16:32:11,911 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T16:32:11,928 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T16:32:11,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T16:32:11,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T16:32:11,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T16:32:11,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T16:32:11,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,951 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=f8f85b0bb1d8,45809,1731515527071, sessionid=0x1013637f1800000, setting cluster-up flag (Was=false) 2024-11-13T16:32:11,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:11,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:12,067 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T16:32:12,098 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:12,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:12,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:12,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:12,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:12,207 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T16:32:12,249 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:12,284 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T16:32:12,331 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(746): ClusterId : 189d46b3-8410-4d36-80e0-1d6ce58cf995 2024-11-13T16:32:12,333 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(746): ClusterId : 189d46b3-8410-4d36-80e0-1d6ce58cf995 2024-11-13T16:32:12,335 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T16:32:12,337 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T16:32:12,342 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(746): ClusterId : 189d46b3-8410-4d36-80e0-1d6ce58cf995 2024-11-13T16:32:12,343 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T16:32:12,354 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-13T16:32:12,364 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:12,365 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-13T16:32:12,371 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T16:32:12,371 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T16:32:12,373 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T16:32:12,373 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T16:32:12,376 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T16:32:12,376 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T16:32:12,392 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T16:32:12,393 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54a13472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f8f85b0bb1d8/172.17.0.3:0 2024-11-13T16:32:12,405 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T16:32:12,406 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79094b8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f8f85b0bb1d8/172.17.0.3:0 2024-11-13T16:32:12,410 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T16:32:12,411 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1839eb33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=f8f85b0bb1d8/172.17.0.3:0 2024-11-13T16:32:12,422 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;f8f85b0bb1d8:38523 2024-11-13T16:32:12,432 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;f8f85b0bb1d8:39619 2024-11-13T16:32:12,451 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T16:32:12,451 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T16:32:12,452 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-13T16:32:12,452 INFO [RS:2;f8f85b0bb1d8:38523 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:12,452 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T16:32:12,453 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T16:32:12,453 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T16:32:12,453 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-13T16:32:12,454 INFO [RS:1;f8f85b0bb1d8:39619 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:12,454 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T16:32:12,459 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=38523, startcode=1731515529292 2024-11-13T16:32:12,462 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=39619, startcode=1731515529105 2024-11-13T16:32:12,463 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;f8f85b0bb1d8:38437 2024-11-13T16:32:12,464 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T16:32:12,464 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T16:32:12,464 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-13T16:32:12,466 INFO [RS:0;f8f85b0bb1d8:38437 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:12,466 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T16:32:12,474 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=38437, startcode=1731515528789 2024-11-13T16:32:12,496 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T16:32:12,497 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T16:32:12,497 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T16:32:12,526 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T16:32:12,549 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T16:32:12,561 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T16:32:12,571 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34335, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T16:32:12,575 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42069, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T16:32:12,578 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38533, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T16:32:12,581 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T16:32:12,589 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: f8f85b0bb1d8,45809,1731515527071 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T16:32:12,598 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T16:32:12,600 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T16:32:12,625 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/f8f85b0bb1d8:0, corePoolSize=5, maxPoolSize=5 2024-11-13T16:32:12,625 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/f8f85b0bb1d8:0, corePoolSize=5, maxPoolSize=5 2024-11-13T16:32:12,625 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/f8f85b0bb1d8:0, corePoolSize=5, maxPoolSize=5 2024-11-13T16:32:12,626 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/f8f85b0bb1d8:0, corePoolSize=5, maxPoolSize=5 2024-11-13T16:32:12,626 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/f8f85b0bb1d8:0, corePoolSize=10, maxPoolSize=10 2024-11-13T16:32:12,626 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:12,629 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/f8f85b0bb1d8:0, corePoolSize=2, maxPoolSize=2 2024-11-13T16:32:12,630 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:12,631 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T16:32:12,632 WARN [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T16:32:12,632 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T16:32:12,632 WARN [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T16:32:12,635 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T16:32:12,635 WARN [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-13T16:32:12,677 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T16:32:12,678 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T16:32:12,683 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731515562683 2024-11-13T16:32:12,686 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T16:32:12,688 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:12,689 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T16:32:12,696 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T16:32:12,706 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T16:32:12,707 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T16:32:12,707 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T16:32:12,708 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T16:32:12,709 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:12,729 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T16:32:12,731 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T16:32:12,732 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T16:32:12,735 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=38523, startcode=1731515529292 2024-11-13T16:32:12,735 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=38437, startcode=1731515528789 2024-11-13T16:32:12,736 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=39619, startcode=1731515529105 2024-11-13T16:32:12,737 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T16:32:12,738 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T16:32:12,740 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T16:32:12,740 WARN [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-13T16:32:12,741 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-13T16:32:12,742 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T16:32:12,743 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T16:32:12,743 WARN [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-13T16:32:12,744 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T16:32:12,747 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-13T16:32:12,747 WARN [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-13T16:32:12,769 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/f8f85b0bb1d8:0:becomeActiveMaster-HFileCleaner.large.0-1731515532746,5,FailOnTimeoutGroup] 2024-11-13T16:32:12,773 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/f8f85b0bb1d8:0:becomeActiveMaster-HFileCleaner.small.0-1731515532769,5,FailOnTimeoutGroup] 2024-11-13T16:32:12,774 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:12,774 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T16:32:12,776 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:12,777 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:12,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741831_1007 (size=1321) 2024-11-13T16:32:12,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741831_1007 (size=1321) 2024-11-13T16:32:12,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741831_1007 (size=1321) 2024-11-13T16:32:12,828 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T16:32:12,829 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:12,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741832_1008 (size=32) 2024-11-13T16:32:12,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741832_1008 (size=32) 2024-11-13T16:32:12,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741832_1008 (size=32) 2024-11-13T16:32:12,943 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=38437, startcode=1731515528789 2024-11-13T16:32:12,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:12,945 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:32:12,948 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] master.ServerManager(517): Registering regionserver=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:32:12,954 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=38523, startcode=1731515529292 2024-11-13T16:32:12,956 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(2659): reportForDuty to master=f8f85b0bb1d8,45809,1731515527071 with port=39619, startcode=1731515529105 2024-11-13T16:32:12,970 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:12,970 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:12,970 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37215 2024-11-13T16:32:12,970 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T16:32:12,970 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] master.ServerManager(517): Registering regionserver=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:12,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T16:32:12,979 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:12,979 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37215 2024-11-13T16:32:12,980 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T16:32:12,980 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] master.ServerManager(363): Checking decommissioned status of RegionServer f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:12,982 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] master.ServerManager(517): Registering regionserver=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:12,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T16:32:12,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:12,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T16:32:12,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:12,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T16:32:12,992 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:12,992 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37215 2024-11-13T16:32:12,992 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T16:32:13,009 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T16:32:13,009 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:13,012 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] zookeeper.ZKUtil(111): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:32:13,013 WARN [RS:0;f8f85b0bb1d8:38437 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T16:32:13,013 INFO [RS:0;f8f85b0bb1d8:38437 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T16:32:13,013 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:32:13,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:13,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T16:32:13,028 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] zookeeper.ZKUtil(111): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:13,028 WARN [RS:2;f8f85b0bb1d8:38523 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T16:32:13,028 INFO [RS:2;f8f85b0bb1d8:38523 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T16:32:13,029 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:13,021 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f8f85b0bb1d8,38523,1731515529292] 2024-11-13T16:32:13,029 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f8f85b0bb1d8,38437,1731515528789] 2024-11-13T16:32:13,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T16:32:13,032 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] zookeeper.ZKUtil(111): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:13,032 WARN [RS:1;f8f85b0bb1d8:39619 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T16:32:13,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T16:32:13,032 INFO [RS:1;f8f85b0bb1d8:39619 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T16:32:13,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:13,033 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:13,036 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [f8f85b0bb1d8,39619,1731515529105] 2024-11-13T16:32:13,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:13,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T16:32:13,066 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T16:32:13,066 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:13,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:13,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T16:32:13,082 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T16:32:13,083 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T16:32:13,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740 2024-11-13T16:32:13,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740 2024-11-13T16:32:13,101 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T16:32:13,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T16:32:13,106 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T16:32:13,110 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T16:32:13,113 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T16:32:13,131 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T16:32:13,135 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T16:32:13,139 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T16:32:13,142 INFO [RS:0;f8f85b0bb1d8:38437 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T16:32:13,142 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,144 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:32:13,147 INFO [RS:1;f8f85b0bb1d8:39619 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T16:32:13,147 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,150 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71725746, jitterRate=0.06879690289497375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T16:32:13,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731515532943Initializing all the Stores at 1731515532952 (+9 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515532952Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515532974 (+22 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515532974Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515532974Cleaning up temporary data from old regions at 1731515533102 (+128 ms)Region opened successfully at 1731515533152 (+50 ms) 2024-11-13T16:32:13,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T16:32:13,153 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T16:32:13,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T16:32:13,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T16:32:13,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T16:32:13,172 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T16:32:13,175 INFO [RS:2;f8f85b0bb1d8:38523 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T16:32:13,175 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,183 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T16:32:13,195 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T16:32:13,197 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,198 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,198 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,198 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,199 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,199 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,200 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f8f85b0bb1d8:0, corePoolSize=2, maxPoolSize=2 2024-11-13T16:32:13,201 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,201 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,201 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,202 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,202 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,202 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,203 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T16:32:13,204 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,207 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,207 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,207 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0, corePoolSize=3, maxPoolSize=3 2024-11-13T16:32:13,207 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,208 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f8f85b0bb1d8:0, corePoolSize=3, maxPoolSize=3 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,208 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731515533153Disabling compacts and flushes for region at 1731515533153Disabling writes for close at 1731515533153Writing region close event to WAL at 1731515533207 (+54 ms)Closed at 1731515533208 (+1 ms) 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f8f85b0bb1d8:0, corePoolSize=2, maxPoolSize=2 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,208 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,209 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,209 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,209 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0, corePoolSize=3, maxPoolSize=3 2024-11-13T16:32:13,209 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f8f85b0bb1d8:0, corePoolSize=3, maxPoolSize=3 2024-11-13T16:32:13,216 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T16:32:13,216 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T16:32:13,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T16:32:13,247 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T16:32:13,252 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T16:32:13,253 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,253 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,253 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,254 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,254 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,254 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,254 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/f8f85b0bb1d8:0, corePoolSize=2, maxPoolSize=2 2024-11-13T16:32:13,262 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T16:32:13,272 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,273 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,273 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,273 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,274 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,274 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/f8f85b0bb1d8:0, corePoolSize=1, maxPoolSize=1 2024-11-13T16:32:13,274 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0, corePoolSize=3, maxPoolSize=3 2024-11-13T16:32:13,274 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/f8f85b0bb1d8:0, corePoolSize=3, maxPoolSize=3 2024-11-13T16:32:13,277 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T16:32:13,287 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,288 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,288 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,288 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,289 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,289 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,39619,1731515529105-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T16:32:13,296 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,297 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,297 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,297 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,297 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,297 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,38437,1731515528789-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T16:32:13,344 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T16:32:13,348 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,38437,1731515528789-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,349 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,349 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.Replication(171): f8f85b0bb1d8,38437,1731515528789 started 2024-11-13T16:32:13,379 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,379 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,379 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,379 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,380 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,380 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,38523,1731515529292-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T16:32:13,403 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T16:32:13,403 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,39619,1731515529105-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,404 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,405 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.Replication(171): f8f85b0bb1d8,39619,1731515529105 started 2024-11-13T16:32:13,428 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,431 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T16:32:13,431 WARN [f8f85b0bb1d8:45809 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-13T16:32:13,432 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,38523,1731515529292-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,432 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,432 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.Replication(171): f8f85b0bb1d8,38523,1731515529292 started 2024-11-13T16:32:13,435 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1482): Serving as f8f85b0bb1d8,38437,1731515528789, RpcServer on f8f85b0bb1d8/172.17.0.3:38437, sessionid=0x1013637f1800001 2024-11-13T16:32:13,436 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T16:32:13,436 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:32:13,442 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f8f85b0bb1d8,38437,1731515528789' 2024-11-13T16:32:13,443 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T16:32:13,444 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T16:32:13,453 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T16:32:13,459 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T16:32:13,464 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:32:13,465 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f8f85b0bb1d8,38437,1731515528789' 2024-11-13T16:32:13,465 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T16:32:13,471 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T16:32:13,472 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,473 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1482): Serving as f8f85b0bb1d8,39619,1731515529105, RpcServer on f8f85b0bb1d8/172.17.0.3:39619, sessionid=0x1013637f1800002 2024-11-13T16:32:13,473 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T16:32:13,473 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:13,473 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f8f85b0bb1d8,39619,1731515529105' 2024-11-13T16:32:13,473 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T16:32:13,475 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T16:32:13,475 INFO [RS:0;f8f85b0bb1d8:38437 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T16:32:13,475 INFO [RS:0;f8f85b0bb1d8:38437 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T16:32:13,483 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T16:32:13,495 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T16:32:13,495 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T16:32:13,495 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:13,495 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f8f85b0bb1d8,39619,1731515529105' 2024-11-13T16:32:13,495 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T16:32:13,499 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T16:32:13,503 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:13,503 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T16:32:13,504 INFO [RS:1;f8f85b0bb1d8:39619 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T16:32:13,504 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1482): Serving as f8f85b0bb1d8,38523,1731515529292, RpcServer on f8f85b0bb1d8/172.17.0.3:38523, sessionid=0x1013637f1800003 2024-11-13T16:32:13,504 INFO [RS:1;f8f85b0bb1d8:39619 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T16:32:13,504 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T16:32:13,504 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:13,504 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f8f85b0bb1d8,38523,1731515529292' 2024-11-13T16:32:13,504 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T16:32:13,515 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T16:32:13,521 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T16:32:13,522 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T16:32:13,522 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:13,522 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'f8f85b0bb1d8,38523,1731515529292' 2024-11-13T16:32:13,522 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T16:32:13,531 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T16:32:13,534 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T16:32:13,534 INFO [RS:2;f8f85b0bb1d8:38523 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T16:32:13,534 INFO [RS:2;f8f85b0bb1d8:38523 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T16:32:13,585 INFO [RS:0;f8f85b0bb1d8:38437 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T16:32:13,611 INFO [RS:1;f8f85b0bb1d8:39619 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T16:32:13,612 INFO [RS:0;f8f85b0bb1d8:38437 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f8f85b0bb1d8%2C38437%2C1731515528789, suffix=, logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38437,1731515528789, archiveDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs, maxLogs=32 2024-11-13T16:32:13,635 INFO [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f8f85b0bb1d8%2C39619%2C1731515529105, suffix=, logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105, archiveDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs, maxLogs=32 2024-11-13T16:32:13,639 INFO [RS:2;f8f85b0bb1d8:38523 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-13T16:32:13,665 INFO [RS:2;f8f85b0bb1d8:38523 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f8f85b0bb1d8%2C38523%2C1731515529292, suffix=, logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38523,1731515529292, archiveDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs, maxLogs=32 2024-11-13T16:32:13,676 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105/f8f85b0bb1d8%2C39619%2C1731515529105.1731515533637, exclude list is [], retry=0 2024-11-13T16:32:13,690 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38437,1731515528789/f8f85b0bb1d8%2C38437%2C1731515528789.1731515533615, exclude list is [], retry=0 2024-11-13T16:32:13,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43257,DS-32a1c586-35ae-429b-b238-eeb62a943751,DISK] 2024-11-13T16:32:13,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33001,DS-ccd91292-b849-4472-a6b5-6c3406b0f09b,DISK] 2024-11-13T16:32:13,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-b7f07e8f-e796-4039-9dba-04979c32c609,DISK] 2024-11-13T16:32:13,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33001,DS-ccd91292-b849-4472-a6b5-6c3406b0f09b,DISK] 2024-11-13T16:32:13,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-b7f07e8f-e796-4039-9dba-04979c32c609,DISK] 2024-11-13T16:32:13,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43257,DS-32a1c586-35ae-429b-b238-eeb62a943751,DISK] 2024-11-13T16:32:13,730 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38523,1731515529292/f8f85b0bb1d8%2C38523%2C1731515529292.1731515533668, exclude list is [], retry=0 2024-11-13T16:32:13,816 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33001,DS-ccd91292-b849-4472-a6b5-6c3406b0f09b,DISK] 2024-11-13T16:32:13,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43257,DS-32a1c586-35ae-429b-b238-eeb62a943751,DISK] 2024-11-13T16:32:13,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-b7f07e8f-e796-4039-9dba-04979c32c609,DISK] 2024-11-13T16:32:13,818 INFO [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105/f8f85b0bb1d8%2C39619%2C1731515529105.1731515533637 2024-11-13T16:32:13,831 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40831:40831),(127.0.0.1/127.0.0.1:36461:36461),(127.0.0.1/127.0.0.1:34817:34817)] 2024-11-13T16:32:13,851 INFO [RS:0;f8f85b0bb1d8:38437 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38437,1731515528789/f8f85b0bb1d8%2C38437%2C1731515528789.1731515533615 2024-11-13T16:32:13,874 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36461:36461),(127.0.0.1/127.0.0.1:40831:40831),(127.0.0.1/127.0.0.1:34817:34817)] 2024-11-13T16:32:13,885 INFO [RS:2;f8f85b0bb1d8:38523 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,38523,1731515529292/f8f85b0bb1d8%2C38523%2C1731515529292.1731515533668 2024-11-13T16:32:13,890 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36461:36461),(127.0.0.1/127.0.0.1:40831:40831),(127.0.0.1/127.0.0.1:34817:34817)] 2024-11-13T16:32:13,936 DEBUG [f8f85b0bb1d8:45809 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-13T16:32:13,961 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:32:13,991 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:32:13,991 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:32:13,991 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:32:13,991 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:32:13,991 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:32:13,992 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:32:13,992 INFO [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:32:13,992 INFO [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:32:13,992 INFO [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:32:13,992 DEBUG [f8f85b0bb1d8:45809 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:32:14,020 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:14,030 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f8f85b0bb1d8,39619,1731515529105, state=OPENING 2024-11-13T16:32:14,130 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T16:32:14,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:14,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:14,381 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:14,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:14,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:14,384 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:14,386 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:14,386 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:14,389 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T16:32:14,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:32:14,669 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T16:32:14,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54855, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:32:14,771 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T16:32:14,772 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-13T16:32:14,773 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-13T16:32:14,792 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=f8f85b0bb1d8%2C39619%2C1731515529105.meta, suffix=.meta, logDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105, archiveDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs, maxLogs=32 2024-11-13T16:32:14,840 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105/f8f85b0bb1d8%2C39619%2C1731515529105.meta.1731515534795.meta, exclude list is [], retry=0 2024-11-13T16:32:14,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33001,DS-ccd91292-b849-4472-a6b5-6c3406b0f09b,DISK] 2024-11-13T16:32:14,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43257,DS-32a1c586-35ae-429b-b238-eeb62a943751,DISK] 2024-11-13T16:32:14,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-b7f07e8f-e796-4039-9dba-04979c32c609,DISK] 2024-11-13T16:32:14,932 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105/f8f85b0bb1d8%2C39619%2C1731515529105.meta.1731515534795.meta 2024-11-13T16:32:14,955 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36461:36461),(127.0.0.1/127.0.0.1:34817:34817),(127.0.0.1/127.0.0.1:40831:40831)] 2024-11-13T16:32:14,955 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T16:32:14,957 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-13T16:32:14,961 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:14,963 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T16:32:14,965 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T16:32:14,970 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T16:32:14,983 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T16:32:14,984 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:14,985 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T16:32:14,985 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T16:32:15,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T16:32:15,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T16:32:15,006 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:15,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:15,008 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T16:32:15,011 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T16:32:15,011 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:15,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:15,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T16:32:15,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T16:32:15,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:15,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:15,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T16:32:15,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T16:32:15,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:15,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T16:32:15,045 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T16:32:15,052 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740 2024-11-13T16:32:15,083 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740 2024-11-13T16:32:15,103 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T16:32:15,103 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T16:32:15,106 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T16:32:15,115 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T16:32:15,117 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72686216, jitterRate=0.08310902118682861}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T16:32:15,118 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T16:32:15,123 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731515534986Writing region info on filesystem at 1731515534986Initializing all the Stores at 1731515534999 (+13 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515535000 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515535001 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515535001Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515535001Cleaning up temporary data from old regions at 1731515535103 (+102 ms)Running coprocessor post-open hooks at 1731515535118 (+15 ms)Region opened successfully at 1731515535122 (+4 ms) 2024-11-13T16:32:15,145 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731515534627 2024-11-13T16:32:15,166 DEBUG [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T16:32:15,167 INFO [RS_OPEN_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T16:32:15,169 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:15,179 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as f8f85b0bb1d8,39619,1731515529105, state=OPEN 2024-11-13T16:32:15,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T16:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T16:32:15,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:15,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:15,236 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:15,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T16:32:15,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T16:32:15,239 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:15,239 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T16:32:15,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T16:32:15,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=f8f85b0bb1d8,39619,1731515529105 in 842 msec 2024-11-13T16:32:15,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T16:32:15,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 2.0650 sec 2024-11-13T16:32:15,331 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T16:32:15,331 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T16:32:15,394 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:15,395 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:15,433 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:15,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44751, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:15,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 3.1410 sec 2024-11-13T16:32:15,544 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731515535543, completionTime=-1 2024-11-13T16:32:15,547 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-13T16:32:15,548 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-13T16:32:15,602 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-13T16:32:15,602 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731515595602 2024-11-13T16:32:15,602 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731515655602 2024-11-13T16:32:15,602 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 54 msec 2024-11-13T16:32:15,610 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:32:15,645 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,45809,1731515527071-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:15,650 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,45809,1731515527071-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:15,650 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,45809,1731515527071-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:15,654 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-f8f85b0bb1d8:45809, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:15,661 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:15,675 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:15,677 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T16:32:15,744 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 6.229sec 2024-11-13T16:32:15,750 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T16:32:15,752 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T16:32:15,755 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T16:32:15,756 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T16:32:15,756 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T16:32:15,757 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,45809,1731515527071-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T16:32:15,758 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,45809,1731515527071-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T16:32:15,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4994d2d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:15,926 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T16:32:15,926 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T16:32:15,929 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T16:32:15,930 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:15,934 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@40dce742 2024-11-13T16:32:15,936 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T16:32:15,944 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54529, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T16:32:15,955 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:32:15,959 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:32:15,961 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T16:32:16,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-13T16:32:16,057 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:32:16,063 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:32:16,069 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:16,071 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:32:16,071 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-13T16:32:16,071 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:32:16,074 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e1be08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:16,074 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:32:16,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T16:32:16,081 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:32:16,087 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:32:16,108 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:16,113 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45204, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:32:16,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@685fa12c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:16,124 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:16,142 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:16,148 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:16,192 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:16,196 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:16,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-13T16:32:16,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/test.cache.data in system properties and HBase conf 2024-11-13T16:32:16,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T16:32:16,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir in system properties and HBase conf 2024-11-13T16:32:16,197 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T16:32:16,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T16:32:16,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T16:32:16,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T16:32:16,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T16:32:16,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/nfs.dump.dir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir in system properties and HBase conf 2024-11-13T16:32:16,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T16:32:16,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T16:32:16,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T16:32:16,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741837_1013 (size=349) 2024-11-13T16:32:16,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741837_1013 (size=349) 2024-11-13T16:32:16,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741837_1013 (size=349) 2024-11-13T16:32:16,229 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 618c30cdb779cfeb9cea6e5be1e51395, NAME => 'hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:16,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T16:32:16,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741838_1014 (size=36) 2024-11-13T16:32:16,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741838_1014 (size=36) 2024-11-13T16:32:16,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741838_1014 (size=36) 2024-11-13T16:32:16,386 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:16,387 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 618c30cdb779cfeb9cea6e5be1e51395, disabling compactions & flushes 2024-11-13T16:32:16,387 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:16,387 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:16,387 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. after waiting 0 ms 2024-11-13T16:32:16,387 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:16,387 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:16,387 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 618c30cdb779cfeb9cea6e5be1e51395: Waiting for close lock at 1731515536386Disabling compacts and flushes for region at 1731515536386Disabling writes for close at 1731515536387 (+1 ms)Writing region close event to WAL at 1731515536387Closed at 1731515536387 2024-11-13T16:32:16,400 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:32:16,408 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1731515536401"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515536401"}]},"ts":"1731515536401"} 2024-11-13T16:32:16,422 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T16:32:16,428 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:32:16,436 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515536428"}]},"ts":"1731515536428"} 2024-11-13T16:32:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T16:32:16,453 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-13T16:32:16,454 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:32:16,459 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:32:16,460 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:32:16,460 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:32:16,460 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:32:16,460 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:32:16,460 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:32:16,460 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:32:16,460 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:32:16,460 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:32:16,460 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:32:16,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, ASSIGN}] 2024-11-13T16:32:16,471 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, ASSIGN 2024-11-13T16:32:16,475 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:32:16,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741839_1015 (size=592039) 2024-11-13T16:32:16,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741839_1015 (size=592039) 2024-11-13T16:32:16,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741839_1015 (size=592039) 2024-11-13T16:32:16,632 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T16:32:16,634 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=618c30cdb779cfeb9cea6e5be1e51395, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:16,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, ASSIGN because future has completed 2024-11-13T16:32:16,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:32:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741840_1016 (size=1663647) 2024-11-13T16:32:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741840_1016 (size=1663647) 2024-11-13T16:32:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741840_1016 (size=1663647) 2024-11-13T16:32:16,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T16:32:16,833 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T16:32:16,851 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60021, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:32:16,903 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:16,903 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 618c30cdb779cfeb9cea6e5be1e51395, NAME => 'hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.', STARTKEY => '', ENDKEY => ''} 2024-11-13T16:32:16,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. service=AccessControlService 2024-11-13T16:32:16,904 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:16,911 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:16,911 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:16,915 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:16,915 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:16,938 INFO [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:16,974 INFO [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 618c30cdb779cfeb9cea6e5be1e51395 columnFamilyName l 2024-11-13T16:32:16,974 DEBUG [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:16,990 INFO [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] regionserver.HStore(327): Store=618c30cdb779cfeb9cea6e5be1e51395/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:32:16,993 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,002 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,011 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,057 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,058 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,071 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,109 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:32:17,111 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 618c30cdb779cfeb9cea6e5be1e51395; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58995301, jitterRate=-0.12090151011943817}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:32:17,111 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:32:17,126 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 618c30cdb779cfeb9cea6e5be1e51395: Running coprocessor pre-open hook at 1731515536915Writing region info on filesystem at 1731515536915Initializing all the Stores at 1731515536924 (+9 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515536924Cleaning up temporary data from old regions at 1731515537058 (+134 ms)Running coprocessor post-open hooks at 1731515537111 (+53 ms)Region opened successfully at 1731515537124 (+13 ms) 2024-11-13T16:32:17,148 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., pid=6, masterSystemTime=1731515536832 2024-11-13T16:32:17,194 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:17,195 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=618c30cdb779cfeb9cea6e5be1e51395, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:17,195 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:32:17,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:32:17,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T16:32:17,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38523,1731515529292 in 561 msec 2024-11-13T16:32:17,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T16:32:17,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, ASSIGN in 774 msec 2024-11-13T16:32:17,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T16:32:17,276 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:32:17,277 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515537277"}]},"ts":"1731515537277"} 2024-11-13T16:32:17,286 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-13T16:32:17,300 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:32:17,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 1.3400 sec 2024-11-13T16:32:17,718 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:32:17,719 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-13T16:32:17,728 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T16:32:17,728 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T16:32:17,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-13T16:32:17,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-13T16:32:17,767 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:32:17,767 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-13T16:32:17,768 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-13T16:32:17,768 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-13T16:32:17,769 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:32:17,769 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-13T16:32:17,771 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T16:32:17,772 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T16:32:17,773 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T16:32:17,774 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T16:32:18,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T16:32:18,292 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-13T16:32:18,331 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T16:32:18,333 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T16:32:18,333 INFO [master/f8f85b0bb1d8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=f8f85b0bb1d8,45809,1731515527071-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T16:32:19,071 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-13T16:32:19,109 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T16:32:20,591 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-nodemanager.properties,hadoop-metrics2.properties 2024-11-13T16:32:22,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:22,678 WARN [Thread-385 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:23,224 INFO [Thread-385 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:23,224 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-13T16:32:23,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:23,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:23,330 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:23,330 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T16:32:23,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:23,343 INFO [Thread-385 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:23,343 INFO [Thread-385 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:23,343 INFO [Thread-385 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T16:32:23,355 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57fd5d16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:23,356 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e876d54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:23,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bb896b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:23,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b9969de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:23,743 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-13T16:32:23,744 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-13T16:32:23,744 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-13T16:32:23,751 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-13T16:32:23,896 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:24,795 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:25,622 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:25,668 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37e897fc{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-46547-hadoop-yarn-common-3_4_1_jar-_-any-10549517307767169420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-13T16:32:25,669 INFO [Thread-385 {}] server.AbstractConnector(333): Started ServerConnector@25f3d501{HTTP/1.1, (http/1.1)}{localhost:46547} 2024-11-13T16:32:25,669 INFO [Thread-385 {}] server.Server(415): Started @32278ms 2024-11-13T16:32:25,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1e339e34{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-46237-hadoop-yarn-common-3_4_1_jar-_-any-1371576575316348060/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-13T16:32:25,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@251a1092{HTTP/1.1, (http/1.1)}{localhost:46237} 2024-11-13T16:32:25,680 INFO [Time-limited test {}] server.Server(415): Started @32289ms 2024-11-13T16:32:26,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741841_1017 (size=5) 2024-11-13T16:32:26,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741841_1017 (size=5) 2024-11-13T16:32:26,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741841_1017 (size=5) 2024-11-13T16:32:29,806 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-13T16:32:29,814 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:29,920 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-13T16:32:29,922 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:30,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:30,024 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:30,024 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T16:32:30,045 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:30,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43189f04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:30,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1011848b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:30,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-13T16:32:30,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-13T16:32:30,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-13T16:32:30,139 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-13T16:32:30,167 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:30,219 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:30,452 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:30,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7729f16{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-37639-hadoop-yarn-common-3_4_1_jar-_-any-17815405795304488470/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T16:32:30,484 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f1603c7{HTTP/1.1, (http/1.1)}{localhost:37639} 2024-11-13T16:32:30,484 INFO [Time-limited test {}] server.Server(415): Started @37093ms 2024-11-13T16:32:31,347 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-13T16:32:31,354 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:31,442 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-13T16:32:31,446 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T16:32:31,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T16:32:31,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T16:32:31,491 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T16:32:31,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T16:32:31,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24dfe4c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,AVAILABLE} 2024-11-13T16:32:31,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b9940e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-13T16:32:31,674 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-13T16:32:31,674 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-13T16:32:31,674 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-13T16:32:31,674 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-13T16:32:31,688 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:31,697 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:32,050 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-13T16:32:32,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3747195e{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/java.io.tmpdir/jetty-localhost-38339-hadoop-yarn-common-3_4_1_jar-_-any-4025707366003401797/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T16:32:32,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6aa7bca0{HTTP/1.1, (http/1.1)}{localhost:38339} 2024-11-13T16:32:32,091 INFO [Time-limited test {}] server.Server(415): Started @38700ms 2024-11-13T16:32:32,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-13T16:32:32,180 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:32:32,274 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=715, OpenFileDescriptor=769, MaxFileDescriptor=1048576, SystemLoadAverage=1381, ProcessCount=13, AvailableMemoryMB=4518 2024-11-13T16:32:32,277 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=715 is superior to 500 2024-11-13T16:32:32,285 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T16:32:32,293 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:32:32,293 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@22eefbca 2024-11-13T16:32:32,294 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T16:32:32,306 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59048, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T16:32:32,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:32:32,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:32:32,330 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:32:32,343 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-13T16:32:32,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T16:32:32,357 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:32:32,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T16:32:32,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741842_1018 (size=442) 2024-11-13T16:32:32,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741842_1018 (size=442) 2024-11-13T16:32:32,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741842_1018 (size=442) 2024-11-13T16:32:32,522 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd566cdf4cddf6c6aa17b382a0748fbc, NAME => 'testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:32,528 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b244849f410331d24fc6fa8bfa1098bf, NAME => 'testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:32,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741843_1019 (size=67) 2024-11-13T16:32:32,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741843_1019 (size=67) 2024-11-13T16:32:32,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741843_1019 (size=67) 2024-11-13T16:32:32,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T16:32:32,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741844_1020 (size=67) 2024-11-13T16:32:32,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741844_1020 (size=67) 2024-11-13T16:32:32,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741844_1020 (size=67) 2024-11-13T16:32:32,721 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:32,721 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing b244849f410331d24fc6fa8bfa1098bf, disabling compactions & flushes 2024-11-13T16:32:32,721 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:32,721 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:32,721 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. after waiting 0 ms 2024-11-13T16:32:32,721 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:32,722 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:32,726 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for b244849f410331d24fc6fa8bfa1098bf: Waiting for close lock at 1731515552721Disabling compacts and flushes for region at 1731515552721Disabling writes for close at 1731515552721Writing region close event to WAL at 1731515552721Closed at 1731515552722 (+1 ms) 2024-11-13T16:32:33,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T16:32:33,030 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:33,030 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing cd566cdf4cddf6c6aa17b382a0748fbc, disabling compactions & flushes 2024-11-13T16:32:33,030 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,031 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,031 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. after waiting 0 ms 2024-11-13T16:32:33,031 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,031 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,031 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd566cdf4cddf6c6aa17b382a0748fbc: Waiting for close lock at 1731515553030Disabling compacts and flushes for region at 1731515553030Disabling writes for close at 1731515553031 (+1 ms)Writing region close event to WAL at 1731515553031Closed at 1731515553031 2024-11-13T16:32:33,042 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:32:33,043 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731515553042"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515553042"}]},"ts":"1731515553042"} 2024-11-13T16:32:33,043 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731515553042"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515553042"}]},"ts":"1731515553042"} 2024-11-13T16:32:33,177 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:32:33,181 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:32:33,182 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515553181"}]},"ts":"1731515553181"} 2024-11-13T16:32:33,215 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-13T16:32:33,216 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:32:33,222 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:32:33,222 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:32:33,222 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:32:33,222 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:32:33,223 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:32:33,223 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:32:33,223 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:32:33,223 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:32:33,223 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:32:33,223 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:32:33,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, ASSIGN}] 2024-11-13T16:32:33,236 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, ASSIGN 2024-11-13T16:32:33,239 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, ASSIGN 2024-11-13T16:32:33,242 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:32:33,251 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:32:33,399 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:32:33,399 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=b244849f410331d24fc6fa8bfa1098bf, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:33,400 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=cd566cdf4cddf6c6aa17b382a0748fbc, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:33,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, ASSIGN because future has completed 2024-11-13T16:32:33,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure b244849f410331d24fc6fa8bfa1098bf, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:32:33,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, ASSIGN because future has completed 2024-11-13T16:32:33,442 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:32:33,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T16:32:33,659 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,660 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => cd566cdf4cddf6c6aa17b382a0748fbc, NAME => 'testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:32:33,660 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. service=AccessControlService 2024-11-13T16:32:33,661 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:33,662 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,662 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:33,662 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,663 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,700 INFO [StoreOpener-cd566cdf4cddf6c6aa17b382a0748fbc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,721 INFO [StoreOpener-cd566cdf4cddf6c6aa17b382a0748fbc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd566cdf4cddf6c6aa17b382a0748fbc columnFamilyName cf 2024-11-13T16:32:33,728 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:33,729 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => b244849f410331d24fc6fa8bfa1098bf, NAME => 'testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:32:33,730 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. service=AccessControlService 2024-11-13T16:32:33,743 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:32:33,743 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,743 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:32:33,744 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,744 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,748 DEBUG [StoreOpener-cd566cdf4cddf6c6aa17b382a0748fbc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:33,751 INFO [StoreOpener-cd566cdf4cddf6c6aa17b382a0748fbc-1 {}] regionserver.HStore(327): Store=cd566cdf4cddf6c6aa17b382a0748fbc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:32:33,759 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,761 INFO [StoreOpener-b244849f410331d24fc6fa8bfa1098bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,763 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,769 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,771 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,771 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,775 INFO [StoreOpener-b244849f410331d24fc6fa8bfa1098bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b244849f410331d24fc6fa8bfa1098bf columnFamilyName cf 2024-11-13T16:32:33,785 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,787 DEBUG [StoreOpener-b244849f410331d24fc6fa8bfa1098bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:33,792 INFO [StoreOpener-b244849f410331d24fc6fa8bfa1098bf-1 {}] regionserver.HStore(327): Store=b244849f410331d24fc6fa8bfa1098bf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:32:33,793 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,801 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,804 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,810 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,810 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,812 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:32:33,813 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened cd566cdf4cddf6c6aa17b382a0748fbc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73160814, jitterRate=0.09018108248710632}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:32:33,814 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:33,816 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for cd566cdf4cddf6c6aa17b382a0748fbc: Running coprocessor pre-open hook at 1731515553663Writing region info on filesystem at 1731515553663Initializing all the Stores at 1731515553677 (+14 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515553677Cleaning up temporary data from old regions at 1731515553771 (+94 ms)Running coprocessor post-open hooks at 1731515553814 (+43 ms)Region opened successfully at 1731515553815 (+1 ms) 2024-11-13T16:32:33,818 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,825 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc., pid=11, masterSystemTime=1731515553611 2024-11-13T16:32:33,837 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,837 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:33,838 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=cd566cdf4cddf6c6aa17b382a0748fbc, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:32:33,838 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:32:33,840 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened b244849f410331d24fc6fa8bfa1098bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68164039, jitterRate=0.0157233327627182}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:32:33,840 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:33,840 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for b244849f410331d24fc6fa8bfa1098bf: Running coprocessor pre-open hook at 1731515553744Writing region info on filesystem at 1731515553744Initializing all the Stores at 1731515553754 (+10 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515553754Cleaning up temporary data from old regions at 1731515553810 (+56 ms)Running coprocessor post-open hooks at 1731515553840 (+30 ms)Region opened successfully at 1731515553840 2024-11-13T16:32:33,850 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf., pid=10, masterSystemTime=1731515553611 2024-11-13T16:32:33,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:32:33,871 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:33,871 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:33,880 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=b244849f410331d24fc6fa8bfa1098bf, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:32:33,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-11-13T16:32:33,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc, server=f8f85b0bb1d8,38523,1731515529292 in 417 msec 2024-11-13T16:32:33,893 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=f8f85b0bb1d8,39619,1731515529105, table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:32:33,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure b244849f410331d24fc6fa8bfa1098bf, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:32:33,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, ASSIGN in 660 msec 2024-11-13T16:32:33,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-13T16:32:33,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure b244849f410331d24fc6fa8bfa1098bf, server=f8f85b0bb1d8,39619,1731515529105 in 495 msec 2024-11-13T16:32:33,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-13T16:32:33,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, ASSIGN in 721 msec 2024-11-13T16:32:33,979 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:32:33,980 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515553979"}]},"ts":"1731515553979"} 2024-11-13T16:32:33,989 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-13T16:32:33,998 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:32:34,005 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-13T16:32:34,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:32:34,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,033 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54699, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,047 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:34,048 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:34,048 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,050 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37969, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:32:34,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57439, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,067 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-13T16:32:34,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T16:32:34,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T16:32:34,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:34,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:34,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T16:32:34,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:34,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-13T16:32:34,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:32:34,225 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T16:32:34,225 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T16:32:34,226 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T16:32:34,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-13T16:32:34,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 1.9290 sec 2024-11-13T16:32:34,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T16:32:34,551 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T16:32:34,556 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:34,568 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-13T16:32:34,570 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:34,571 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:32:34,585 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:34,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:34,672 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37798, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,699 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:34,740 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T16:32:34,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515554740 (current time:1731515554740). 2024-11-13T16:32:34,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:32:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-13T16:32:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:32:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3060bd08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:32:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:32:34,774 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:32:34,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:32:34,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:32:34,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43a8ed94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:34,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:32:34,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:32:34,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:34,778 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:32:34,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@564cee70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:34,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:34,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:34,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,800 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52782, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,805 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:32:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:32:34,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:34,835 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:32:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bf1b555, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:32:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:32:34,855 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:32:34,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:32:34,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:32:34,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a2234e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:34,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:32:34,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:32:34,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:34,862 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:32:34,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e85aefb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:34,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:34,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,883 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52788, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:32:34,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:34,913 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:34,925 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:32:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:32:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:34,926 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:32:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-13T16:32:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:32:34,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T16:32:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-13T16:32:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T16:32:34,999 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:32:35,027 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:32:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T16:32:35,100 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:32:35,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T16:32:35,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741845_1021 (size=167) 2024-11-13T16:32:35,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741845_1021 (size=167) 2024-11-13T16:32:35,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741845_1021 (size=167) 2024-11-13T16:32:35,432 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:32:35,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf}] 2024-11-13T16:32:35,449 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:35,451 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:35,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-13T16:32:35,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-13T16:32:35,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:35,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:35,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for cd566cdf4cddf6c6aa17b382a0748fbc: 2024-11-13T16:32:35,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. for emptySnaptb0-testExportWithTargetName completed. 2024-11-13T16:32:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T16:32:35,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for b244849f410331d24fc6fa8bfa1098bf: 2024-11-13T16:32:35,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. for emptySnaptb0-testExportWithTargetName completed. 2024-11-13T16:32:35,677 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-13T16:32:35,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-13T16:32:35,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:32:35,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:32:35,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:32:35,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:32:35,903 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:32:35,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741846_1022 (size=70) 2024-11-13T16:32:35,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741846_1022 (size=70) 2024-11-13T16:32:35,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741846_1022 (size=70) 2024-11-13T16:32:35,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:35,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-13T16:32:35,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741847_1023 (size=70) 2024-11-13T16:32:35,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-13T16:32:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741847_1023 (size=70) 2024-11-13T16:32:35,980 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:35,980 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:35,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741847_1023 (size=70) 2024-11-13T16:32:35,991 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:35,991 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-13T16:32:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-13T16:32:35,998 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:36,003 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:36,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc in 553 msec 2024-11-13T16:32:36,024 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:32:36,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-13T16:32:36,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf in 573 msec 2024-11-13T16:32:36,030 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:32:36,040 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:32:36,040 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:32:36,041 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:36,044 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:32:36,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741848_1024 (size=62) 2024-11-13T16:32:36,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741848_1024 (size=62) 2024-11-13T16:32:36,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741848_1024 (size=62) 2024-11-13T16:32:36,169 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:32:36,169 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-13T16:32:36,176 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-13T16:32:36,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T16:32:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741849_1025 (size=649) 2024-11-13T16:32:36,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741849_1025 (size=649) 2024-11-13T16:32:36,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741849_1025 (size=649) 2024-11-13T16:32:36,354 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:32:36,411 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:32:36,419 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-13T16:32:36,439 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:32:36,440 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-13T16:32:36,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.4800 sec 2024-11-13T16:32:37,076 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-13T16:32:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-13T16:32:37,191 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T16:32:37,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:32:37,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:32:37,254 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:37,262 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-13T16:32:37,262 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:37,262 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:32:37,266 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:37,294 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:37,314 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-13T16:32:37,328 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T16:32:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515557328 (current time:1731515557328). 2024-11-13T16:32:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:32:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-13T16:32:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:32:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bca5d12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:32:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:32:37,333 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:32:37,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:32:37,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:32:37,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59b534e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:37,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:32:37,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:32:37,338 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40320, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:32:37,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:37,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1177839d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:37,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:37,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:37,350 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52792, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:37,357 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:32:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:32:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:37,360 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:32:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1095b1f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:32:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:32:37,371 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:32:37,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:32:37,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:32:37,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51e384fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:37,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:32:37,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:32:37,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:37,377 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:32:37,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d337272, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:32:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:32:37,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:32:37,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:37,399 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:37,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:32:37,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:32:37,420 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:32:37,423 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:32:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:32:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:32:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-13T16:32:37,426 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:32:37,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:32:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-13T16:32:37,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-13T16:32:37,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T16:32:37,459 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:32:37,465 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:32:37,496 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:32:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T16:32:37,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741850_1026 (size=162) 2024-11-13T16:32:37,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741850_1026 (size=162) 2024-11-13T16:32:37,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741850_1026 (size=162) 2024-11-13T16:32:37,595 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:32:37,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf}] 2024-11-13T16:32:37,616 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:37,617 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-13T16:32:37,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-13T16:32:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T16:32:37,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-13T16:32:37,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-13T16:32:37,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:37,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:37,799 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing b244849f410331d24fc6fa8bfa1098bf 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-13T16:32:37,800 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing cd566cdf4cddf6c6aa17b382a0748fbc 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-13T16:32:38,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc is 71, key is 01fa41d8c36170fe5e66f6d356f52dc9/cf:q/1731515557226/Put/seqid=0 2024-11-13T16:32:38,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf is 71, key is 11012cf1e8515e2b3b39e7a64786b6c1/cf:q/1731515557234/Put/seqid=0 2024-11-13T16:32:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T16:32:38,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741852_1028 (size=5241) 2024-11-13T16:32:38,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741852_1028 (size=5241) 2024-11-13T16:32:38,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741852_1028 (size=5241) 2024-11-13T16:32:38,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741851_1027 (size=8031) 2024-11-13T16:32:38,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:38,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741851_1027 (size=8031) 2024-11-13T16:32:38,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:38,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741851_1027 (size=8031) 2024-11-13T16:32:38,354 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:38,359 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:38,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/.tmp/cf/e364eb1dbf7d451b8397e43d98fcc0cc, store: [table=testtb-testExportWithTargetName family=cf region=cd566cdf4cddf6c6aa17b382a0748fbc] 2024-11-13T16:32:38,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/.tmp/cf/92cc9fa595064949859461ed8174e579, store: [table=testtb-testExportWithTargetName family=cf region=b244849f410331d24fc6fa8bfa1098bf] 2024-11-13T16:32:38,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/.tmp/cf/92cc9fa595064949859461ed8174e579 is 208, key is 195f8542b1e570c084343c67792602701/cf:q/1731515557234/Put/seqid=0 2024-11-13T16:32:38,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/.tmp/cf/e364eb1dbf7d451b8397e43d98fcc0cc is 208, key is 0a39d51d7a71c30b4a40b39f82c47b904/cf:q/1731515557226/Put/seqid=0 2024-11-13T16:32:38,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741853_1029 (size=14541) 2024-11-13T16:32:38,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741853_1029 (size=14541) 2024-11-13T16:32:38,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741853_1029 (size=14541) 2024-11-13T16:32:38,465 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/.tmp/cf/92cc9fa595064949859461ed8174e579 2024-11-13T16:32:38,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741854_1030 (size=6320) 2024-11-13T16:32:38,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741854_1030 (size=6320) 2024-11-13T16:32:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741854_1030 (size=6320) 2024-11-13T16:32:38,490 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/.tmp/cf/e364eb1dbf7d451b8397e43d98fcc0cc 2024-11-13T16:32:38,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/.tmp/cf/92cc9fa595064949859461ed8174e579 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf/92cc9fa595064949859461ed8174e579 2024-11-13T16:32:38,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/.tmp/cf/e364eb1dbf7d451b8397e43d98fcc0cc as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf/e364eb1dbf7d451b8397e43d98fcc0cc 2024-11-13T16:32:38,584 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf/92cc9fa595064949859461ed8174e579, entries=45, sequenceid=6, filesize=14.2 K 2024-11-13T16:32:38,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T16:32:38,609 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf/e364eb1dbf7d451b8397e43d98fcc0cc, entries=5, sequenceid=6, filesize=6.2 K 2024-11-13T16:32:38,618 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for cd566cdf4cddf6c6aa17b382a0748fbc in 820ms, sequenceid=6, compaction requested=false 2024-11-13T16:32:38,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for cd566cdf4cddf6c6aa17b382a0748fbc: 2024-11-13T16:32:38,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. for snaptb0-testExportWithTargetName completed. 2024-11-13T16:32:38,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-13T16:32:38,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:32:38,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf/e364eb1dbf7d451b8397e43d98fcc0cc] hfiles 2024-11-13T16:32:38,623 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for b244849f410331d24fc6fa8bfa1098bf in 821ms, sequenceid=6, compaction requested=false 2024-11-13T16:32:38,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for b244849f410331d24fc6fa8bfa1098bf: 2024-11-13T16:32:38,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. for snaptb0-testExportWithTargetName completed. 2024-11-13T16:32:38,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-13T16:32:38,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:32:38,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf/92cc9fa595064949859461ed8174e579] hfiles 2024-11-13T16:32:38,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf/e364eb1dbf7d451b8397e43d98fcc0cc for snapshot=snaptb0-testExportWithTargetName 2024-11-13T16:32:38,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf/92cc9fa595064949859461ed8174e579 for snapshot=snaptb0-testExportWithTargetName 2024-11-13T16:32:38,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741855_1031 (size=109) 2024-11-13T16:32:38,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741855_1031 (size=109) 2024-11-13T16:32:38,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:32:38,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-13T16:32:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-13T16:32:38,851 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:38,852 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:38,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741855_1031 (size=109) 2024-11-13T16:32:38,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc in 1.2670 sec 2024-11-13T16:32:38,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741856_1032 (size=109) 2024-11-13T16:32:38,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741856_1032 (size=109) 2024-11-13T16:32:38,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741856_1032 (size=109) 2024-11-13T16:32:38,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:32:38,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-13T16:32:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-13T16:32:38,911 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:38,911 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:38,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-13T16:32:38,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b244849f410331d24fc6fa8bfa1098bf in 1.3210 sec 2024-11-13T16:32:38,928 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:32:38,931 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:32:38,943 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:32:38,943 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:32:38,944 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:32:38,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc] hfiles 2024-11-13T16:32:38,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:32:38,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:32:39,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741857_1033 (size=293) 2024-11-13T16:32:39,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741857_1033 (size=293) 2024-11-13T16:32:39,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741857_1033 (size=293) 2024-11-13T16:32:39,102 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:32:39,102 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-13T16:32:39,108 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-13T16:32:39,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741858_1034 (size=959) 2024-11-13T16:32:39,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741858_1034 (size=959) 2024-11-13T16:32:39,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741858_1034 (size=959) 2024-11-13T16:32:39,280 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:32:39,324 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:32:39,347 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:32:39,350 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-13T16:32:39,356 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:32:39,357 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-13T16:32:39,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.9230 sec 2024-11-13T16:32:39,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-13T16:32:39,631 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T16:32:39,632 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631 2024-11-13T16:32:39,632 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:39,757 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:32:39,757 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-13T16:32:39,779 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:32:39,879 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-13T16:32:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741859_1035 (size=162) 2024-11-13T16:32:40,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741859_1035 (size=162) 2024-11-13T16:32:40,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741859_1035 (size=162) 2024-11-13T16:32:40,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741860_1036 (size=959) 2024-11-13T16:32:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741860_1036 (size=959) 2024-11-13T16:32:40,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741860_1036 (size=959) 2024-11-13T16:32:40,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741861_1037 (size=154) 2024-11-13T16:32:40,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741861_1037 (size=154) 2024-11-13T16:32:40,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741861_1037 (size=154) 2024-11-13T16:32:40,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:40,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:40,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-4699205334223742098.jar 2024-11-13T16:32:42,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,941 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-4036980964126531711.jar 2024-11-13T16:32:42,942 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,956 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:32:42,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:32:42,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:32:42,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:32:42,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:32:42,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:32:42,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:32:42,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:32:42,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:32:42,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:32:42,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:32:42,977 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:32:42,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:32:42,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:32:43,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:32:43,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:32:43,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:32:43,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:32:43,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:32:43,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741862_1038 (size=131440) 2024-11-13T16:32:43,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741862_1038 (size=131440) 2024-11-13T16:32:43,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741862_1038 (size=131440) 2024-11-13T16:32:43,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741863_1039 (size=4188619) 2024-11-13T16:32:43,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741863_1039 (size=4188619) 2024-11-13T16:32:43,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741863_1039 (size=4188619) 2024-11-13T16:32:44,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741864_1040 (size=1323991) 2024-11-13T16:32:44,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741864_1040 (size=1323991) 2024-11-13T16:32:44,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741864_1040 (size=1323991) 2024-11-13T16:32:44,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741865_1041 (size=440392) 2024-11-13T16:32:44,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741865_1041 (size=440392) 2024-11-13T16:32:44,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741865_1041 (size=440392) 2024-11-13T16:32:45,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741866_1042 (size=903731) 2024-11-13T16:32:45,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741866_1042 (size=903731) 2024-11-13T16:32:45,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741866_1042 (size=903731) 2024-11-13T16:32:45,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741867_1043 (size=8360083) 2024-11-13T16:32:45,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741867_1043 (size=8360083) 2024-11-13T16:32:45,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741867_1043 (size=8360083) 2024-11-13T16:32:45,961 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:53254 [Receiving block BP-1731676653-172.17.0.3-1731515519385:blk_1073741868_1044] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 437ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/, blockId=1073741868, seqno=0 2024-11-13T16:32:45,962 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:40226 [Receiving block BP-1731676653-172.17.0.3-1731515519385:blk_1073741868_1044] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 439ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/, blockId=1073741868, seqno=0 2024-11-13T16:32:45,962 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:35850 [Receiving block BP-1731676653-172.17.0.3-1731515519385:blk_1073741868_1044] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 434ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/, blockId=1073741868, seqno=0 2024-11-13T16:32:45,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741868_1044 (size=1877034) 2024-11-13T16:32:45,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741868_1044 (size=1877034) 2024-11-13T16:32:45,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741868_1044 (size=1877034) 2024-11-13T16:32:46,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741869_1045 (size=77835) 2024-11-13T16:32:46,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741869_1045 (size=77835) 2024-11-13T16:32:46,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741869_1045 (size=77835) 2024-11-13T16:32:46,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741870_1046 (size=6424544) 2024-11-13T16:32:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741870_1046 (size=6424544) 2024-11-13T16:32:46,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741870_1046 (size=6424544) 2024-11-13T16:32:46,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741871_1047 (size=30949) 2024-11-13T16:32:46,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741871_1047 (size=30949) 2024-11-13T16:32:46,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741871_1047 (size=30949) 2024-11-13T16:32:46,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741872_1048 (size=1597327) 2024-11-13T16:32:46,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741872_1048 (size=1597327) 2024-11-13T16:32:46,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741872_1048 (size=1597327) 2024-11-13T16:32:46,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741873_1049 (size=4695811) 2024-11-13T16:32:46,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741873_1049 (size=4695811) 2024-11-13T16:32:46,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741873_1049 (size=4695811) 2024-11-13T16:32:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741874_1050 (size=232957) 2024-11-13T16:32:46,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741874_1050 (size=232957) 2024-11-13T16:32:46,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741874_1050 (size=232957) 2024-11-13T16:32:46,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741875_1051 (size=127628) 2024-11-13T16:32:46,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741875_1051 (size=127628) 2024-11-13T16:32:46,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741875_1051 (size=127628) 2024-11-13T16:32:46,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741876_1052 (size=20406) 2024-11-13T16:32:46,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741876_1052 (size=20406) 2024-11-13T16:32:46,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741876_1052 (size=20406) 2024-11-13T16:32:46,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741877_1053 (size=5175431) 2024-11-13T16:32:46,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741877_1053 (size=5175431) 2024-11-13T16:32:46,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741877_1053 (size=5175431) 2024-11-13T16:32:47,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741878_1054 (size=217634) 2024-11-13T16:32:47,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741878_1054 (size=217634) 2024-11-13T16:32:47,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741878_1054 (size=217634) 2024-11-13T16:32:47,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741879_1055 (size=1832290) 2024-11-13T16:32:47,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741879_1055 (size=1832290) 2024-11-13T16:32:47,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741879_1055 (size=1832290) 2024-11-13T16:32:47,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741880_1056 (size=322274) 2024-11-13T16:32:47,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741880_1056 (size=322274) 2024-11-13T16:32:47,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741880_1056 (size=322274) 2024-11-13T16:32:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741881_1057 (size=503880) 2024-11-13T16:32:47,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741881_1057 (size=503880) 2024-11-13T16:32:47,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741881_1057 (size=503880) 2024-11-13T16:32:47,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741882_1058 (size=29229) 2024-11-13T16:32:47,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741882_1058 (size=29229) 2024-11-13T16:32:47,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741882_1058 (size=29229) 2024-11-13T16:32:47,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741883_1059 (size=24096) 2024-11-13T16:32:47,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741883_1059 (size=24096) 2024-11-13T16:32:47,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741883_1059 (size=24096) 2024-11-13T16:32:47,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741884_1060 (size=111872) 2024-11-13T16:32:47,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741884_1060 (size=111872) 2024-11-13T16:32:47,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741884_1060 (size=111872) 2024-11-13T16:32:47,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741885_1061 (size=45609) 2024-11-13T16:32:47,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741885_1061 (size=45609) 2024-11-13T16:32:47,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741885_1061 (size=45609) 2024-11-13T16:32:48,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741886_1062 (size=136454) 2024-11-13T16:32:48,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741886_1062 (size=136454) 2024-11-13T16:32:48,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741886_1062 (size=136454) 2024-11-13T16:32:48,060 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:32:48,087 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-13T16:32:48,112 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.3 K 2024-11-13T16:32:48,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741887_1063 (size=722) 2024-11-13T16:32:48,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741887_1063 (size=722) 2024-11-13T16:32:48,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741887_1063 (size=722) 2024-11-13T16:32:48,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741888_1064 (size=15) 2024-11-13T16:32:48,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741888_1064 (size=15) 2024-11-13T16:32:48,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741888_1064 (size=15) 2024-11-13T16:32:48,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741889_1065 (size=303731) 2024-11-13T16:32:48,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741889_1065 (size=303731) 2024-11-13T16:32:48,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741889_1065 (size=303731) 2024-11-13T16:32:49,638 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:32:49,639 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:32:50,566 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0001_000001 (auth:SIMPLE) from 127.0.0.1:58576 2024-11-13T16:32:58,361 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T16:32:58,361 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T16:33:05,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:33:12,131 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0001_000001 (auth:SIMPLE) from 127.0.0.1:55648 2024-11-13T16:33:13,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741890_1066 (size=349381) 2024-11-13T16:33:13,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741890_1066 (size=349381) 2024-11-13T16:33:13,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741890_1066 (size=349381) 2024-11-13T16:33:14,984 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0001_000001 (auth:SIMPLE) from 127.0.0.1:44826 2024-11-13T16:33:15,959 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 618c30cdb779cfeb9cea6e5be1e51395 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:33:15,963 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cd566cdf4cddf6c6aa17b382a0748fbc changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:33:15,977 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b244849f410331d24fc6fa8bfa1098bf changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:33:18,663 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cd566cdf4cddf6c6aa17b382a0748fbc, had cached 0 bytes from a total of 6320 2024-11-13T16:33:18,743 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b244849f410331d24fc6fa8bfa1098bf, had cached 0 bytes from a total of 14541 2024-11-13T16:33:30,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741891_1067 (size=14541) 2024-11-13T16:33:30,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741891_1067 (size=14541) 2024-11-13T16:33:30,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741891_1067 (size=14541) 2024-11-13T16:33:30,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741892_1068 (size=8031) 2024-11-13T16:33:30,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741892_1068 (size=8031) 2024-11-13T16:33:30,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741892_1068 (size=8031) 2024-11-13T16:33:30,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741893_1069 (size=6320) 2024-11-13T16:33:30,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741893_1069 (size=6320) 2024-11-13T16:33:30,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741893_1069 (size=6320) 2024-11-13T16:33:30,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741894_1070 (size=5241) 2024-11-13T16:33:30,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741894_1070 (size=5241) 2024-11-13T16:33:30,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741894_1070 (size=5241) 2024-11-13T16:33:30,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741895_1071 (size=17465) 2024-11-13T16:33:30,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741895_1071 (size=17465) 2024-11-13T16:33:30,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741895_1071 (size=17465) 2024-11-13T16:33:30,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741896_1072 (size=465) 2024-11-13T16:33:30,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741896_1072 (size=465) 2024-11-13T16:33:30,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741896_1072 (size=465) 2024-11-13T16:33:31,109 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0001/container_1731515545832_0001_01_000002/launch_container.sh] 2024-11-13T16:33:31,109 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0001/container_1731515545832_0001_01_000002/container_tokens] 2024-11-13T16:33:31,109 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0001/container_1731515545832_0001_01_000002/sysfs] 2024-11-13T16:33:31,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741897_1073 (size=17465) 2024-11-13T16:33:31,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741897_1073 (size=17465) 2024-11-13T16:33:31,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741897_1073 (size=17465) 2024-11-13T16:33:31,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741898_1074 (size=349381) 2024-11-13T16:33:31,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741898_1074 (size=349381) 2024-11-13T16:33:31,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741898_1074 (size=349381) 2024-11-13T16:33:31,407 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0001_000001 (auth:SIMPLE) from 127.0.0.1:39100 2024-11-13T16:33:33,467 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:33:33,483 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:33:33,541 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-13T16:33:33,541 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:33:33,547 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:33:33,547 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-13T16:33:33,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-13T16:33:33,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-13T16:33:33,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631/.hbase-snapshot/testExportWithTargetName 2024-11-13T16:33:33,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-13T16:33:33,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515559631/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-13T16:33:33,588 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithTargetName 2024-11-13T16:33:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:33,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-13T16:33:33,618 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515613618"}]},"ts":"1731515613618"} 2024-11-13T16:33:33,622 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-13T16:33:33,622 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-13T16:33:33,628 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-13T16:33:33,647 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, UNASSIGN}] 2024-11-13T16:33:33,655 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, UNASSIGN 2024-11-13T16:33:33,656 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, UNASSIGN 2024-11-13T16:33:33,665 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=cd566cdf4cddf6c6aa17b382a0748fbc, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:33:33,667 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=b244849f410331d24fc6fa8bfa1098bf, regionState=CLOSING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:33:33,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, UNASSIGN because future has completed 2024-11-13T16:33:33,675 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:33:33,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, UNASSIGN because future has completed 2024-11-13T16:33:33,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:33:33,682 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:33:33,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure b244849f410331d24fc6fa8bfa1098bf, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:33:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-13T16:33:33,837 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:33:33,837 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:33:33,838 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing cd566cdf4cddf6c6aa17b382a0748fbc, disabling compactions & flushes 2024-11-13T16:33:33,838 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:33:33,838 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:33:33,838 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. after waiting 0 ms 2024-11-13T16:33:33,838 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:33:33,840 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:33:33,840 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:33:33,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing b244849f410331d24fc6fa8bfa1098bf, disabling compactions & flushes 2024-11-13T16:33:33,841 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:33:33,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:33:33,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. after waiting 0 ms 2024-11-13T16:33:33,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:33:33,854 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:33:33,855 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:33:33,858 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:33:33,858 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:33:33,858 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf. 2024-11-13T16:33:33,858 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc. 2024-11-13T16:33:33,859 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for cd566cdf4cddf6c6aa17b382a0748fbc: Waiting for close lock at 1731515613838Running coprocessor pre-close hooks at 1731515613838Disabling compacts and flushes for region at 1731515613838Disabling writes for close at 1731515613838Writing region close event to WAL at 1731515613840 (+2 ms)Running coprocessor post-close hooks at 1731515613856 (+16 ms)Closed at 1731515613858 (+2 ms) 2024-11-13T16:33:33,859 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for b244849f410331d24fc6fa8bfa1098bf: Waiting for close lock at 1731515613841Running coprocessor pre-close hooks at 1731515613841Disabling compacts and flushes for region at 1731515613841Disabling writes for close at 1731515613841Writing region close event to WAL at 1731515613842 (+1 ms)Running coprocessor post-close hooks at 1731515613855 (+13 ms)Closed at 1731515613858 (+3 ms) 2024-11-13T16:33:33,862 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:33:33,864 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=cd566cdf4cddf6c6aa17b382a0748fbc, regionState=CLOSED 2024-11-13T16:33:33,864 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:33:33,866 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=b244849f410331d24fc6fa8bfa1098bf, regionState=CLOSED 2024-11-13T16:33:33,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:33:33,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure b244849f410331d24fc6fa8bfa1098bf, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:33:33,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=20 2024-11-13T16:33:33,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure cd566cdf4cddf6c6aa17b382a0748fbc, server=f8f85b0bb1d8,38523,1731515529292 in 200 msec 2024-11-13T16:33:33,885 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=cd566cdf4cddf6c6aa17b382a0748fbc, UNASSIGN in 235 msec 2024-11-13T16:33:33,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=21 2024-11-13T16:33:33,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure b244849f410331d24fc6fa8bfa1098bf, server=f8f85b0bb1d8,39619,1731515529105 in 198 msec 2024-11-13T16:33:33,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-13T16:33:33,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b244849f410331d24fc6fa8bfa1098bf, UNASSIGN in 238 msec 2024-11-13T16:33:33,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-13T16:33:33,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 265 msec 2024-11-13T16:33:33,908 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515613907"}]},"ts":"1731515613907"} 2024-11-13T16:33:33,917 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-13T16:33:33,917 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-13T16:33:33,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 326 msec 2024-11-13T16:33:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-13T16:33:33,941 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T16:33:33,946 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithTargetName 2024-11-13T16:33:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:33,960 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:33,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-13T16:33:33,968 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:33,972 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-13T16:33:33,984 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:33:33,989 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:33:33,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:33,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:33,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:33,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:33,998 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/recovered.edits] 2024-11-13T16:33:33,999 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T16:33:33,999 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T16:33:33,999 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T16:33:34,000 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-13T16:33:34,002 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/recovered.edits] 2024-11-13T16:33:34,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:34,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:34,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:34,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:34,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:34,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:34,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-13T16:33:34,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:34,019 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf/92cc9fa595064949859461ed8174e579 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/cf/92cc9fa595064949859461ed8174e579 2024-11-13T16:33:34,020 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf/e364eb1dbf7d451b8397e43d98fcc0cc to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/cf/e364eb1dbf7d451b8397e43d98fcc0cc 2024-11-13T16:33:34,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-13T16:33:34,034 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc/recovered.edits/9.seqid 2024-11-13T16:33:34,035 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf/recovered.edits/9.seqid 2024-11-13T16:33:34,038 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:33:34,039 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithTargetName/cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:33:34,039 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-13T16:33:34,041 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-13T16:33:34,047 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-11-13T16:33:34,069 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411138e8c528b971141cf8a1314cc90391f0e_b244849f410331d24fc6fa8bfa1098bf 2024-11-13T16:33:34,077 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411138a7f119c85324f0ea3bb4990bce0efbb_cd566cdf4cddf6c6aa17b382a0748fbc 2024-11-13T16:33:34,081 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-13T16:33:34,091 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:34,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39619 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-13T16:33:34,121 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-13T16:33:34,130 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-13T16:33:34,136 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:34,137 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-13T16:33:34,137 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515614137"}]},"ts":"9223372036854775807"} 2024-11-13T16:33:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-13T16:33:34,139 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515614137"}]},"ts":"9223372036854775807"} 2024-11-13T16:33:34,164 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:33:34,164 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cd566cdf4cddf6c6aa17b382a0748fbc, NAME => 'testtb-testExportWithTargetName,,1731515552308.cd566cdf4cddf6c6aa17b382a0748fbc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b244849f410331d24fc6fa8bfa1098bf, NAME => 'testtb-testExportWithTargetName,1,1731515552308.b244849f410331d24fc6fa8bfa1098bf.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:33:34,165 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-13T16:33:34,165 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515614165"}]},"ts":"9223372036854775807"} 2024-11-13T16:33:34,185 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-13T16:33:34,191 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-13T16:33:34,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 243 msec 2024-11-13T16:33:34,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-13T16:33:34,349 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-13T16:33:34,350 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-13T16:33:34,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-13T16:33:34,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-13T16:33:34,391 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-13T16:33:34,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-13T16:33:34,443 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=754 (was 715) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:50366 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1170669712_1 at /127.0.0.1:46908 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'JobHistoryServer' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:34656 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 83408) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39131 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:35813 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-1296 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35813 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:46930 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1170669712_1 at /127.0.0.1:50348 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 769) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1998 (was 1381) - SystemLoadAverage LEAK? -, ProcessCount=16 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=1443 (was 4518) 2024-11-13T16:33:34,444 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=754 is superior to 500 2024-11-13T16:33:34,475 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=754, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=1998, ProcessCount=16, AvailableMemoryMB=1435 2024-11-13T16:33:34,475 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=754 is superior to 500 2024-11-13T16:33:34,478 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:33:34,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:33:34,492 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:33:34,494 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-13T16:33:34,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T16:33:34,503 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:33:34,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741899_1075 (size=440) 2024-11-13T16:33:34,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741899_1075 (size=440) 2024-11-13T16:33:34,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741899_1075 (size=440) 2024-11-13T16:33:34,526 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b604a0fb59df25b4445300e838beaaad, NAME => 'testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:33:34,526 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3b9925ab6291465de82c43a01aa5ea9a, NAME => 'testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:33:34,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741900_1076 (size=65) 2024-11-13T16:33:34,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741900_1076 (size=65) 2024-11-13T16:33:34,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741900_1076 (size=65) 2024-11-13T16:33:34,555 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:34,555 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 3b9925ab6291465de82c43a01aa5ea9a, disabling compactions & flushes 2024-11-13T16:33:34,555 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:34,555 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:34,555 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. after waiting 0 ms 2024-11-13T16:33:34,555 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:34,555 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:34,555 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3b9925ab6291465de82c43a01aa5ea9a: Waiting for close lock at 1731515614555Disabling compacts and flushes for region at 1731515614555Disabling writes for close at 1731515614555Writing region close event to WAL at 1731515614555Closed at 1731515614555 2024-11-13T16:33:34,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741901_1077 (size=65) 2024-11-13T16:33:34,562 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:34,562 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing b604a0fb59df25b4445300e838beaaad, disabling compactions & flushes 2024-11-13T16:33:34,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741901_1077 (size=65) 2024-11-13T16:33:34,563 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:34,563 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:34,563 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. after waiting 0 ms 2024-11-13T16:33:34,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741901_1077 (size=65) 2024-11-13T16:33:34,563 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:34,563 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:34,563 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for b604a0fb59df25b4445300e838beaaad: Waiting for close lock at 1731515614562Disabling compacts and flushes for region at 1731515614562Disabling writes for close at 1731515614563 (+1 ms)Writing region close event to WAL at 1731515614563Closed at 1731515614563 2024-11-13T16:33:34,565 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:33:34,566 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731515614565"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515614565"}]},"ts":"1731515614565"} 2024-11-13T16:33:34,566 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731515614565"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515614565"}]},"ts":"1731515614565"} 2024-11-13T16:33:34,570 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:33:34,572 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:33:34,572 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515614572"}]},"ts":"1731515614572"} 2024-11-13T16:33:34,576 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-13T16:33:34,577 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:33:34,579 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:33:34,579 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:33:34,579 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:33:34,579 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:33:34,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, ASSIGN}] 2024-11-13T16:33:34,582 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, ASSIGN 2024-11-13T16:33:34,582 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, ASSIGN 2024-11-13T16:33:34,585 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:33:34,585 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:33:34,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T16:33:34,736 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:33:34,737 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=b604a0fb59df25b4445300e838beaaad, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:33:34,739 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=3b9925ab6291465de82c43a01aa5ea9a, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:33:34,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, ASSIGN because future has completed 2024-11-13T16:33:34,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, ASSIGN because future has completed 2024-11-13T16:33:34,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure b604a0fb59df25b4445300e838beaaad, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:33:34,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:33:34,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T16:33:34,927 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T16:33:34,935 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:34,935 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => b604a0fb59df25b4445300e838beaaad, NAME => 'testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:33:34,935 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. service=AccessControlService 2024-11-13T16:33:34,936 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:33:34,936 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49083, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:33:34,936 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,936 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:34,936 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,936 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,948 INFO [StoreOpener-b604a0fb59df25b4445300e838beaaad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,959 INFO [StoreOpener-b604a0fb59df25b4445300e838beaaad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b604a0fb59df25b4445300e838beaaad columnFamilyName cf 2024-11-13T16:33:34,960 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:34,960 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => 3b9925ab6291465de82c43a01aa5ea9a, NAME => 'testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:33:34,960 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. service=AccessControlService 2024-11-13T16:33:34,960 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:33:34,961 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,961 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:34,961 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,961 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,962 DEBUG [StoreOpener-b604a0fb59df25b4445300e838beaaad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:34,963 INFO [StoreOpener-b604a0fb59df25b4445300e838beaaad-1 {}] regionserver.HStore(327): Store=b604a0fb59df25b4445300e838beaaad/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:33:34,964 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,965 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,967 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,968 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,968 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,976 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:34,982 INFO [StoreOpener-3b9925ab6291465de82c43a01aa5ea9a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,989 INFO [StoreOpener-3b9925ab6291465de82c43a01aa5ea9a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3b9925ab6291465de82c43a01aa5ea9a columnFamilyName cf 2024-11-13T16:33:34,991 DEBUG [StoreOpener-3b9925ab6291465de82c43a01aa5ea9a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:34,992 INFO [StoreOpener-3b9925ab6291465de82c43a01aa5ea9a-1 {}] regionserver.HStore(327): Store=3b9925ab6291465de82c43a01aa5ea9a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:33:34,992 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,994 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,995 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,996 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:34,997 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:35,008 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:33:35,009 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:35,009 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened b604a0fb59df25b4445300e838beaaad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71149887, jitterRate=0.06021593511104584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:33:35,009 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:35,010 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for b604a0fb59df25b4445300e838beaaad: Running coprocessor pre-open hook at 1731515614936Writing region info on filesystem at 1731515614936Initializing all the Stores at 1731515614943 (+7 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515614943Cleaning up temporary data from old regions at 1731515614970 (+27 ms)Running coprocessor post-open hooks at 1731515615009 (+39 ms)Region opened successfully at 1731515615010 (+1 ms) 2024-11-13T16:33:35,013 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad., pid=28, masterSystemTime=1731515614926 2024-11-13T16:33:35,021 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:35,021 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:35,023 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=b604a0fb59df25b4445300e838beaaad, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:33:35,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure b604a0fb59df25b4445300e838beaaad, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:33:35,030 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:33:35,033 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened 3b9925ab6291465de82c43a01aa5ea9a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68405036, jitterRate=0.019314467906951904}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:33:35,033 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:35,033 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for 3b9925ab6291465de82c43a01aa5ea9a: Running coprocessor pre-open hook at 1731515614961Writing region info on filesystem at 1731515614961Initializing all the Stores at 1731515614978 (+17 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515614978Cleaning up temporary data from old regions at 1731515614997 (+19 ms)Running coprocessor post-open hooks at 1731515615033 (+36 ms)Region opened successfully at 1731515615033 2024-11-13T16:33:35,040 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a., pid=29, masterSystemTime=1731515614926 2024-11-13T16:33:35,053 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=3b9925ab6291465de82c43a01aa5ea9a, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:33:35,054 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:35,054 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:35,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=26 2024-11-13T16:33:35,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure b604a0fb59df25b4445300e838beaaad, server=f8f85b0bb1d8,39619,1731515529105 in 284 msec 2024-11-13T16:33:35,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:33:35,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, ASSIGN in 480 msec 2024-11-13T16:33:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T16:33:35,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=27 2024-11-13T16:33:35,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a, server=f8f85b0bb1d8,38437,1731515528789 in 329 msec 2024-11-13T16:33:35,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-11-13T16:33:35,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, ASSIGN in 540 msec 2024-11-13T16:33:35,141 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:33:35,142 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515615141"}]},"ts":"1731515615141"} 2024-11-13T16:33:35,158 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-13T16:33:35,161 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:33:35,162 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-13T16:33:35,173 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T16:33:35,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:35,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:35,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:35,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:35,252 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:35,252 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:35,255 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:35,258 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:35,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 773 msec 2024-11-13T16:33:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-13T16:33:35,630 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T16:33:35,630 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:35,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-13T16:33:35,635 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:35,635 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:33:35,637 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:35,651 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:35,662 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:35,675 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:35,678 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:35,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T16:33:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515615686 (current time:1731515615686). 2024-11-13T16:33:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:33:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-13T16:33:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:33:35,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9764ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:35,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:33:35,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:33:35,693 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:33:35,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:33:35,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:33:35,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a5c9f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:35,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:33:35,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:33:35,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:35,696 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:33:35,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7054e207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:35,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:33:35,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:33:35,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:35,703 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59274, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:35,706 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:33:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:33:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:35,706 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:33:35,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32808312, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:35,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:33:35,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:33:35,709 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:33:35,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:33:35,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:33:35,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5545cce0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:35,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:33:35,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:33:35,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:35,711 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60154, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:33:35,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cb4d01e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:33:35,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:33:35,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:35,720 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:35,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:33:35,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:35,727 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:35,730 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:33:35,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:33:35,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:35,731 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:33:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T16:33:35,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:33:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T16:33:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-13T16:33:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T16:33:35,744 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:33:35,748 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:33:35,758 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:33:35,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741902_1078 (size=161) 2024-11-13T16:33:35,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741902_1078 (size=161) 2024-11-13T16:33:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741902_1078 (size=161) 2024-11-13T16:33:35,791 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:33:35,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a}] 2024-11-13T16:33:35,795 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:35,796 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T16:33:35,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:33:35,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-13T16:33:35,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-13T16:33:35,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:35,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for 3b9925ab6291465de82c43a01aa5ea9a: 2024-11-13T16:33:35,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-13T16:33:35,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-13T16:33:35,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:33:35,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:33:35,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:35,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for b604a0fb59df25b4445300e838beaaad: 2024-11-13T16:33:35,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-13T16:33:35,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-13T16:33:35,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:33:35,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:33:35,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741903_1079 (size=68) 2024-11-13T16:33:35,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741903_1079 (size=68) 2024-11-13T16:33:35,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741903_1079 (size=68) 2024-11-13T16:33:35,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741904_1080 (size=68) 2024-11-13T16:33:35,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741904_1080 (size=68) 2024-11-13T16:33:35,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741904_1080 (size=68) 2024-11-13T16:33:35,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:35,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-13T16:33:35,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-13T16:33:35,997 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:35,997 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:36,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad in 207 msec 2024-11-13T16:33:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T16:33:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T16:33:36,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:36,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-13T16:33:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-13T16:33:36,386 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:36,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:36,405 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:33:36,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=30 2024-11-13T16:33:36,408 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:33:36,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a in 598 msec 2024-11-13T16:33:36,420 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:33:36,421 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:33:36,421 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:36,423 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:33:36,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741905_1081 (size=60) 2024-11-13T16:33:36,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741905_1081 (size=60) 2024-11-13T16:33:36,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741905_1081 (size=60) 2024-11-13T16:33:36,522 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:33:36,522 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-13T16:33:36,523 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-13T16:33:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741906_1082 (size=641) 2024-11-13T16:33:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741906_1082 (size=641) 2024-11-13T16:33:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741906_1082 (size=641) 2024-11-13T16:33:36,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T16:33:36,958 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:33:36,972 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:33:36,973 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-13T16:33:36,976 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:33:36,976 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-13T16:33:36,991 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 1.2450 sec 2024-11-13T16:33:37,088 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-13T16:33:37,577 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0001_000001 (auth:SIMPLE) from 127.0.0.1:33002 2024-11-13T16:33:37,587 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_2/usercache/jenkins/appcache/application_1731515545832_0001/container_1731515545832_0001_01_000001/launch_container.sh] 2024-11-13T16:33:37,587 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_2/usercache/jenkins/appcache/application_1731515545832_0001/container_1731515545832_0001_01_000001/container_tokens] 2024-11-13T16:33:37,587 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_2/usercache/jenkins/appcache/application_1731515545832_0001/container_1731515545832_0001_01_000001/sysfs] 2024-11-13T16:33:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-13T16:33:37,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-13T16:33:37,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-13T16:33:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-13T16:33:37,918 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T16:33:37,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:33:37,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:33:37,968 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:37,975 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-13T16:33:37,975 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:37,975 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:33:37,981 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:37,994 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:38,015 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:38,033 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T16:33:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515618033 (current time:1731515618033). 2024-11-13T16:33:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:33:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-13T16:33:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:33:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37d7cdee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:33:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:33:38,037 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:33:38,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:33:38,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:33:38,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@640dc967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:38,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:33:38,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:33:38,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:38,041 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60174, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:33:38,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0b9c44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:33:38,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:33:38,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:38,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:38,050 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:33:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:33:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:38,052 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:33:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4240e7ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:33:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:33:38,057 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:33:38,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:33:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:33:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3118a51c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:33:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:33:38,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:38,062 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:33:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736027da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:33:38,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:33:38,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:38,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59290, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:38,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:33:38,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:38,076 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38990, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:38,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:33:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:33:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T16:33:38,080 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:33:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:33:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-13T16:33:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-13T16:33:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T16:33:38,091 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:33:38,093 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:33:38,101 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:33:38,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741907_1083 (size=156) 2024-11-13T16:33:38,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741907_1083 (size=156) 2024-11-13T16:33:38,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741907_1083 (size=156) 2024-11-13T16:33:38,164 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:33:38,164 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a}] 2024-11-13T16:33:38,179 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:38,182 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T16:33:38,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-13T16:33:38,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-13T16:33:38,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:38,341 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing b604a0fb59df25b4445300e838beaaad 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-13T16:33:38,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:38,364 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing 3b9925ab6291465de82c43a01aa5ea9a 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-13T16:33:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T16:33:38,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a is 71, key is 10506bdabb08f17d9de6f5b427eed759/cf:q/1731515617958/Put/seqid=0 2024-11-13T16:33:38,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad is 71, key is 0b6eff74d7488a05691f093ffcde2590/cf:q/1731515617952/Put/seqid=0 2024-11-13T16:33:38,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741908_1084 (size=8101) 2024-11-13T16:33:38,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741908_1084 (size=8101) 2024-11-13T16:33:38,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741908_1084 (size=8101) 2024-11-13T16:33:38,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:38,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741909_1085 (size=5172) 2024-11-13T16:33:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741909_1085 (size=5172) 2024-11-13T16:33:38,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741909_1085 (size=5172) 2024-11-13T16:33:38,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:38,589 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:38,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/.tmp/cf/0ff8f75385844983a431f8d60b7c45c8, store: [table=testtb-testExportWithResetTtl family=cf region=3b9925ab6291465de82c43a01aa5ea9a] 2024-11-13T16:33:38,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/.tmp/cf/0ff8f75385844983a431f8d60b7c45c8 is 206, key is 18ad8bb98d77315b20f69722e9f959447/cf:q/1731515617958/Put/seqid=0 2024-11-13T16:33:38,604 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:38,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/.tmp/cf/27945d21662447eda1891733ea50cc65, store: [table=testtb-testExportWithResetTtl family=cf region=b604a0fb59df25b4445300e838beaaad] 2024-11-13T16:33:38,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/.tmp/cf/27945d21662447eda1891733ea50cc65 is 206, key is 0699d01683890e1874a9b751e16ad6b37/cf:q/1731515617952/Put/seqid=0 2024-11-13T16:33:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T16:33:38,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741911_1087 (size=6108) 2024-11-13T16:33:38,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741911_1087 (size=6108) 2024-11-13T16:33:38,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741911_1087 (size=6108) 2024-11-13T16:33:38,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741910_1086 (size=14651) 2024-11-13T16:33:38,761 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/.tmp/cf/27945d21662447eda1891733ea50cc65 2024-11-13T16:33:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741910_1086 (size=14651) 2024-11-13T16:33:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741910_1086 (size=14651) 2024-11-13T16:33:38,775 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/.tmp/cf/0ff8f75385844983a431f8d60b7c45c8 2024-11-13T16:33:38,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/.tmp/cf/0ff8f75385844983a431f8d60b7c45c8 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf/0ff8f75385844983a431f8d60b7c45c8 2024-11-13T16:33:38,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/.tmp/cf/27945d21662447eda1891733ea50cc65 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf/27945d21662447eda1891733ea50cc65 2024-11-13T16:33:38,828 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf/0ff8f75385844983a431f8d60b7c45c8, entries=46, sequenceid=6, filesize=14.3 K 2024-11-13T16:33:38,837 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 3b9925ab6291465de82c43a01aa5ea9a in 473ms, sequenceid=6, compaction requested=false 2024-11-13T16:33:38,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for 3b9925ab6291465de82c43a01aa5ea9a: 2024-11-13T16:33:38,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. for snaptb0-testExportWithResetTtl completed. 2024-11-13T16:33:38,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T16:33:38,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:33:38,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf/0ff8f75385844983a431f8d60b7c45c8] hfiles 2024-11-13T16:33:38,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf/0ff8f75385844983a431f8d60b7c45c8 for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T16:33:38,846 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf/27945d21662447eda1891733ea50cc65, entries=4, sequenceid=6, filesize=6.0 K 2024-11-13T16:33:38,871 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for b604a0fb59df25b4445300e838beaaad in 530ms, sequenceid=6, compaction requested=false 2024-11-13T16:33:38,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for b604a0fb59df25b4445300e838beaaad: 2024-11-13T16:33:38,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. for snaptb0-testExportWithResetTtl completed. 2024-11-13T16:33:38,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T16:33:38,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:33:38,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf/27945d21662447eda1891733ea50cc65] hfiles 2024-11-13T16:33:38,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf/27945d21662447eda1891733ea50cc65 for snapshot=snaptb0-testExportWithResetTtl 2024-11-13T16:33:38,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741912_1088 (size=107) 2024-11-13T16:33:38,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741912_1088 (size=107) 2024-11-13T16:33:38,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741912_1088 (size=107) 2024-11-13T16:33:38,973 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:33:38,974 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-13T16:33:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-13T16:33:38,977 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:38,978 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:38,984 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a in 816 msec 2024-11-13T16:33:39,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741913_1089 (size=107) 2024-11-13T16:33:39,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741913_1089 (size=107) 2024-11-13T16:33:39,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741913_1089 (size=107) 2024-11-13T16:33:39,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:33:39,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-13T16:33:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-13T16:33:39,056 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:39,057 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:39,081 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:33:39,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-13T16:33:39,087 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:33:39,088 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b604a0fb59df25b4445300e838beaaad in 899 msec 2024-11-13T16:33:39,097 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:33:39,098 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:33:39,098 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:39,114 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad] hfiles 2024-11-13T16:33:39,114 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:33:39,115 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad 2024-11-13T16:33:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741914_1090 (size=291) 2024-11-13T16:33:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741914_1090 (size=291) 2024-11-13T16:33:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741914_1090 (size=291) 2024-11-13T16:33:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T16:33:39,242 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:33:39,242 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-13T16:33:39,249 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-13T16:33:39,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741915_1091 (size=951) 2024-11-13T16:33:39,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741915_1091 (size=951) 2024-11-13T16:33:39,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741915_1091 (size=951) 2024-11-13T16:33:39,409 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:33:39,467 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:33:39,472 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-13T16:33:39,484 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:33:39,484 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-13T16:33:39,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 1.4040 sec 2024-11-13T16:33:39,545 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:33:40,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-13T16:33:40,262 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T16:33:40,267 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:33:40,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-13T16:33:40,276 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:33:40,276 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-13T16:33:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T16:33:40,282 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:33:40,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741916_1092 (size=433) 2024-11-13T16:33:40,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741916_1092 (size=433) 2024-11-13T16:33:40,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741916_1092 (size=433) 2024-11-13T16:33:40,389 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 55e47a9f1f061450128f3dc9bb282520, NAME => 'testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:33:40,394 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e74f391d9aede80a8efc10a7994ab330, NAME => 'testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:33:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T16:33:40,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741918_1094 (size=58) 2024-11-13T16:33:40,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741918_1094 (size=58) 2024-11-13T16:33:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741918_1094 (size=58) 2024-11-13T16:33:40,556 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:40,557 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing e74f391d9aede80a8efc10a7994ab330, disabling compactions & flushes 2024-11-13T16:33:40,557 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:40,557 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:40,557 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. after waiting 0 ms 2024-11-13T16:33:40,557 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:40,557 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:40,557 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for e74f391d9aede80a8efc10a7994ab330: Waiting for close lock at 1731515620556Disabling compacts and flushes for region at 1731515620556Disabling writes for close at 1731515620557 (+1 ms)Writing region close event to WAL at 1731515620557Closed at 1731515620557 2024-11-13T16:33:40,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741917_1093 (size=58) 2024-11-13T16:33:40,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741917_1093 (size=58) 2024-11-13T16:33:40,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741917_1093 (size=58) 2024-11-13T16:33:40,605 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:40,606 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 55e47a9f1f061450128f3dc9bb282520, disabling compactions & flushes 2024-11-13T16:33:40,606 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:40,606 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:40,606 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. after waiting 0 ms 2024-11-13T16:33:40,606 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:40,606 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:40,606 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 55e47a9f1f061450128f3dc9bb282520: Waiting for close lock at 1731515620606Disabling compacts and flushes for region at 1731515620606Disabling writes for close at 1731515620606Writing region close event to WAL at 1731515620606Closed at 1731515620606 2024-11-13T16:33:40,612 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:33:40,613 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731515620612"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515620612"}]},"ts":"1731515620612"} 2024-11-13T16:33:40,613 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731515620612"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515620612"}]},"ts":"1731515620612"} 2024-11-13T16:33:40,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T16:33:40,628 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:33:40,634 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:33:40,634 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515620634"}]},"ts":"1731515620634"} 2024-11-13T16:33:40,645 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-13T16:33:40,645 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:33:40,653 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:33:40,653 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:33:40,653 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:33:40,653 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:33:40,653 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:33:40,653 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:33:40,654 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:33:40,654 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:33:40,654 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:33:40,654 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:33:40,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, ASSIGN}] 2024-11-13T16:33:40,664 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, ASSIGN 2024-11-13T16:33:40,666 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, ASSIGN 2024-11-13T16:33:40,667 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:33:40,675 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:33:40,818 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:33:40,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=e74f391d9aede80a8efc10a7994ab330, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:33:40,820 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=55e47a9f1f061450128f3dc9bb282520, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:33:40,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, ASSIGN because future has completed 2024-11-13T16:33:40,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure e74f391d9aede80a8efc10a7994ab330, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:33:40,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, ASSIGN because future has completed 2024-11-13T16:33:40,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 55e47a9f1f061450128f3dc9bb282520, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:33:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T16:33:41,012 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:41,013 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => e74f391d9aede80a8efc10a7994ab330, NAME => 'testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:33:41,013 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. service=AccessControlService 2024-11-13T16:33:41,014 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:33:41,014 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,014 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:41,014 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,016 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,024 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:41,024 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => 55e47a9f1f061450128f3dc9bb282520, NAME => 'testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:33:41,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. service=AccessControlService 2024-11-13T16:33:41,025 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:33:41,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:33:41,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,038 INFO [StoreOpener-e74f391d9aede80a8efc10a7994ab330-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,056 INFO [StoreOpener-e74f391d9aede80a8efc10a7994ab330-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e74f391d9aede80a8efc10a7994ab330 columnFamilyName cf 2024-11-13T16:33:41,059 DEBUG [StoreOpener-e74f391d9aede80a8efc10a7994ab330-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:41,059 INFO [StoreOpener-55e47a9f1f061450128f3dc9bb282520-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,060 INFO [StoreOpener-e74f391d9aede80a8efc10a7994ab330-1 {}] regionserver.HStore(327): Store=e74f391d9aede80a8efc10a7994ab330/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:33:41,061 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,062 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,064 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,064 INFO [StoreOpener-55e47a9f1f061450128f3dc9bb282520-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 55e47a9f1f061450128f3dc9bb282520 columnFamilyName cf 2024-11-13T16:33:41,064 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,064 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,067 DEBUG [StoreOpener-55e47a9f1f061450128f3dc9bb282520-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:41,073 INFO [StoreOpener-55e47a9f1f061450128f3dc9bb282520-1 {}] regionserver.HStore(327): Store=55e47a9f1f061450128f3dc9bb282520/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:33:41,074 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,076 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,084 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,090 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,092 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,092 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,093 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:33:41,094 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened e74f391d9aede80a8efc10a7994ab330; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62205685, jitterRate=-0.07306306064128876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:33:41,094 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,095 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for e74f391d9aede80a8efc10a7994ab330: Running coprocessor pre-open hook at 1731515621016Writing region info on filesystem at 1731515621016Initializing all the Stores at 1731515621026 (+10 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515621026Cleaning up temporary data from old regions at 1731515621064 (+38 ms)Running coprocessor post-open hooks at 1731515621094 (+30 ms)Region opened successfully at 1731515621095 (+1 ms) 2024-11-13T16:33:41,096 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330., pid=39, masterSystemTime=1731515620997 2024-11-13T16:33:41,106 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:41,106 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:41,112 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,119 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=e74f391d9aede80a8efc10a7994ab330, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:33:41,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure e74f391d9aede80a8efc10a7994ab330, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:33:41,129 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=f8f85b0bb1d8,39619,1731515529105, table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:33:41,132 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:33:41,134 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened 55e47a9f1f061450128f3dc9bb282520; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64990542, jitterRate=-0.031565457582473755}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:33:41,134 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:41,134 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for 55e47a9f1f061450128f3dc9bb282520: Running coprocessor pre-open hook at 1731515621025Writing region info on filesystem at 1731515621026 (+1 ms)Initializing all the Stores at 1731515621035 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515621035Cleaning up temporary data from old regions at 1731515621092 (+57 ms)Running coprocessor post-open hooks at 1731515621134 (+42 ms)Region opened successfully at 1731515621134 2024-11-13T16:33:41,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-13T16:33:41,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure e74f391d9aede80a8efc10a7994ab330, server=f8f85b0bb1d8,39619,1731515529105 in 296 msec 2024-11-13T16:33:41,140 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520., pid=40, masterSystemTime=1731515621000 2024-11-13T16:33:41,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, ASSIGN in 482 msec 2024-11-13T16:33:41,146 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:41,147 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:41,151 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=55e47a9f1f061450128f3dc9bb282520, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:33:41,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 55e47a9f1f061450128f3dc9bb282520, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:33:41,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=37, resume processing ppid=36 2024-11-13T16:33:41,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, ASSIGN in 535 msec 2024-11-13T16:33:41,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=37 2024-11-13T16:33:41,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure 55e47a9f1f061450128f3dc9bb282520, server=f8f85b0bb1d8,38523,1731515529292 in 331 msec 2024-11-13T16:33:41,201 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:33:41,201 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515621201"}]},"ts":"1731515621201"} 2024-11-13T16:33:41,205 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-13T16:33:41,211 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:33:41,212 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-13T16:33:41,252 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T16:33:41,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:41,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:41,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:41,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:33:41,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,288 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,288 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,289 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:33:41,294 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 1.0260 sec 2024-11-13T16:33:41,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:33:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-13T16:33:41,442 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-13T16:33:41,443 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,453 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-13T16:33:41,453 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:41,453 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:33:41,460 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,478 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,500 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:33:41,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39619 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:33:41,572 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-13T16:33:41,579 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:41,580 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:33:41,590 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,604 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,636 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-13T16:33:41,649 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-13T16:33:41,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515621649 (current time:1731515621649). 2024-11-13T16:33:41,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-13T16:33:41,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:33:41,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c84754b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:41,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:33:41,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:33:41,662 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:33:41,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:33:41,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:33:41,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22dd140f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:41,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:33:41,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:33:41,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:41,668 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60218, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:33:41,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@181dbae5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:41,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:33:41,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:33:41,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:41,682 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:41,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:33:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:33:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:41,692 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:33:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10693299, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:33:41,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:33:41,700 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:33:41,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:33:41,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:33:41,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31d53828, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:41,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:33:41,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:33:41,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:41,705 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:33:41,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aa6ede2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:33:41,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:33:41,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:33:41,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:41,719 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:41,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:33:41,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:33:41,726 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:33:41,731 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:33:41,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:33:41,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:41,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:33:41,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-13T16:33:41,735 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:33:41,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:33:41,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-13T16:33:41,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-13T16:33:41,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T16:33:41,757 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:33:41,759 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:33:41,769 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:33:41,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741919_1095 (size=143) 2024-11-13T16:33:41,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T16:33:41,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741919_1095 (size=143) 2024-11-13T16:33:41,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741919_1095 (size=143) 2024-11-13T16:33:41,883 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:33:41,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55e47a9f1f061450128f3dc9bb282520}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e74f391d9aede80a8efc10a7994ab330}] 2024-11-13T16:33:41,894 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:41,895 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:42,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-13T16:33:42,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-13T16:33:42,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:42,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing e74f391d9aede80a8efc10a7994ab330 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-13T16:33:42,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:42,065 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing 55e47a9f1f061450128f3dc9bb282520 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-13T16:33:42,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T16:33:42,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330 is 71, key is 17703cffbe99ab4f47121a57528b480b/cf:q/1731515621566/Put/seqid=0 2024-11-13T16:33:42,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520 is 71, key is 075bd1caf77678b97a6156f9c43b3905/cf:q/1731515621547/Put/seqid=0 2024-11-13T16:33:42,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741920_1096 (size=8172) 2024-11-13T16:33:42,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741920_1096 (size=8172) 2024-11-13T16:33:42,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:42,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741921_1097 (size=5102) 2024-11-13T16:33:42,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741920_1096 (size=8172) 2024-11-13T16:33:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741921_1097 (size=5102) 2024-11-13T16:33:42,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741921_1097 (size=5102) 2024-11-13T16:33:42,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:42,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/.tmp/cf/8f8939b5e0c448829c2f729aae82e8b6, store: [table=testExportWithResetTtl family=cf region=e74f391d9aede80a8efc10a7994ab330] 2024-11-13T16:33:42,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/.tmp/cf/8f8939b5e0c448829c2f729aae82e8b6 is 199, key is 133a8422d8aca48f2685e880657e3ec92/cf:q/1731515621566/Put/seqid=0 2024-11-13T16:33:42,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741922_1098 (size=14519) 2024-11-13T16:33:42,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741922_1098 (size=14519) 2024-11-13T16:33:42,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741922_1098 (size=14519) 2024-11-13T16:33:42,380 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/.tmp/cf/8f8939b5e0c448829c2f729aae82e8b6 2024-11-13T16:33:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T16:33:42,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/.tmp/cf/8f8939b5e0c448829c2f729aae82e8b6 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf/8f8939b5e0c448829c2f729aae82e8b6 2024-11-13T16:33:42,497 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf/8f8939b5e0c448829c2f729aae82e8b6, entries=47, sequenceid=5, filesize=14.2 K 2024-11-13T16:33:42,505 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for e74f391d9aede80a8efc10a7994ab330 in 447ms, sequenceid=5, compaction requested=false 2024-11-13T16:33:42,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-13T16:33:42,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for e74f391d9aede80a8efc10a7994ab330: 2024-11-13T16:33:42,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. for snaptb-testExportWithResetTtl completed. 2024-11-13T16:33:42,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-13T16:33:42,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:33:42,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf/8f8939b5e0c448829c2f729aae82e8b6] hfiles 2024-11-13T16:33:42,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf/8f8939b5e0c448829c2f729aae82e8b6 for snapshot=snaptb-testExportWithResetTtl 2024-11-13T16:33:42,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741923_1099 (size=100) 2024-11-13T16:33:42,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741923_1099 (size=100) 2024-11-13T16:33:42,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741923_1099 (size=100) 2024-11-13T16:33:42,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:33:42,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-13T16:33:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-13T16:33:42,582 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:42,583 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:42,595 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e74f391d9aede80a8efc10a7994ab330 in 705 msec 2024-11-13T16:33:42,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:42,695 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:42,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/.tmp/cf/3f1d943c22244e60b17839002e876f02, store: [table=testExportWithResetTtl family=cf region=55e47a9f1f061450128f3dc9bb282520] 2024-11-13T16:33:42,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/.tmp/cf/3f1d943c22244e60b17839002e876f02 is 199, key is 0a206da48734e9ca9b3a83a9aac94ef90/cf:q/1731515621547/Put/seqid=0 2024-11-13T16:33:42,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741924_1100 (size=5878) 2024-11-13T16:33:42,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741924_1100 (size=5878) 2024-11-13T16:33:42,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741924_1100 (size=5878) 2024-11-13T16:33:42,742 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/.tmp/cf/3f1d943c22244e60b17839002e876f02 2024-11-13T16:33:42,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/.tmp/cf/3f1d943c22244e60b17839002e876f02 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf/3f1d943c22244e60b17839002e876f02 2024-11-13T16:33:42,836 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf/3f1d943c22244e60b17839002e876f02, entries=3, sequenceid=5, filesize=5.7 K 2024-11-13T16:33:42,841 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 55e47a9f1f061450128f3dc9bb282520 in 776ms, sequenceid=5, compaction requested=false 2024-11-13T16:33:42,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for 55e47a9f1f061450128f3dc9bb282520: 2024-11-13T16:33:42,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. for snaptb-testExportWithResetTtl completed. 2024-11-13T16:33:42,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-13T16:33:42,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:33:42,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf/3f1d943c22244e60b17839002e876f02] hfiles 2024-11-13T16:33:42,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf/3f1d943c22244e60b17839002e876f02 for snapshot=snaptb-testExportWithResetTtl 2024-11-13T16:33:42,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741925_1101 (size=100) 2024-11-13T16:33:42,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741925_1101 (size=100) 2024-11-13T16:33:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741925_1101 (size=100) 2024-11-13T16:33:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T16:33:42,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:33:42,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-13T16:33:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-13T16:33:42,931 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:42,932 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:42,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-13T16:33:42,951 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:33:42,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 55e47a9f1f061450128f3dc9bb282520 in 1.0550 sec 2024-11-13T16:33:42,957 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:33:42,971 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:33:42,971 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:33:42,971 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:33:42,987 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520] hfiles 2024-11-13T16:33:42,987 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:33:42,987 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:33:43,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741926_1102 (size=284) 2024-11-13T16:33:43,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741926_1102 (size=284) 2024-11-13T16:33:43,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741926_1102 (size=284) 2024-11-13T16:33:43,078 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:33:43,079 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-13T16:33:43,084 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-13T16:33:43,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741927_1103 (size=923) 2024-11-13T16:33:43,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741927_1103 (size=923) 2024-11-13T16:33:43,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741927_1103 (size=923) 2024-11-13T16:33:43,211 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:33:43,242 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:33:43,247 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-13T16:33:43,253 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:33:43,253 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-13T16:33:43,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 1.5130 sec 2024-11-13T16:33:43,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-13T16:33:43,946 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-13T16:33:44,008 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007 2024-11-13T16:33:44,008 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:33:44,060 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:33:44,060 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-13T16:33:44,064 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:33:44,105 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-13T16:33:44,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741928_1104 (size=923) 2024-11-13T16:33:44,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741928_1104 (size=923) 2024-11-13T16:33:44,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741928_1104 (size=923) 2024-11-13T16:33:44,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741929_1105 (size=143) 2024-11-13T16:33:44,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741929_1105 (size=143) 2024-11-13T16:33:44,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741929_1105 (size=143) 2024-11-13T16:33:44,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741930_1106 (size=141) 2024-11-13T16:33:44,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741930_1106 (size=141) 2024-11-13T16:33:44,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741930_1106 (size=141) 2024-11-13T16:33:44,466 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:44,467 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:44,468 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-18245654829521010342.jar 2024-11-13T16:33:46,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,518 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:33:46,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-10242619354754891491.jar 2024-11-13T16:33:46,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:33:46,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:33:46,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:33:46,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:33:46,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:33:46,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:33:46,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:33:46,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:33:46,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:33:46,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:33:46,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:33:46,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:33:46,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:33:46,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:33:46,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:33:46,789 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:33:46,790 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:33:46,790 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:33:46,790 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:33:47,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741931_1107 (size=131440) 2024-11-13T16:33:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741931_1107 (size=131440) 2024-11-13T16:33:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741931_1107 (size=131440) 2024-11-13T16:33:47,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741932_1108 (size=4188619) 2024-11-13T16:33:47,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741932_1108 (size=4188619) 2024-11-13T16:33:47,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741932_1108 (size=4188619) 2024-11-13T16:33:47,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741933_1109 (size=6424544) 2024-11-13T16:33:47,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741933_1109 (size=6424544) 2024-11-13T16:33:47,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741933_1109 (size=6424544) 2024-11-13T16:33:47,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741934_1110 (size=1323991) 2024-11-13T16:33:47,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741934_1110 (size=1323991) 2024-11-13T16:33:47,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741934_1110 (size=1323991) 2024-11-13T16:33:47,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741935_1111 (size=903731) 2024-11-13T16:33:47,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741935_1111 (size=903731) 2024-11-13T16:33:47,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741935_1111 (size=903731) 2024-11-13T16:33:47,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-13T16:33:47,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-13T16:33:47,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741936_1112 (size=8360083) 2024-11-13T16:33:47,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741936_1112 (size=8360083) 2024-11-13T16:33:47,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741936_1112 (size=8360083) 2024-11-13T16:33:47,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741937_1113 (size=1877034) 2024-11-13T16:33:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741937_1113 (size=1877034) 2024-11-13T16:33:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741937_1113 (size=1877034) 2024-11-13T16:33:47,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741938_1114 (size=77835) 2024-11-13T16:33:47,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741938_1114 (size=77835) 2024-11-13T16:33:47,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741938_1114 (size=77835) 2024-11-13T16:33:47,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741939_1115 (size=30949) 2024-11-13T16:33:47,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741939_1115 (size=30949) 2024-11-13T16:33:47,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741939_1115 (size=30949) 2024-11-13T16:33:48,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741940_1116 (size=1597327) 2024-11-13T16:33:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741940_1116 (size=1597327) 2024-11-13T16:33:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741940_1116 (size=1597327) 2024-11-13T16:33:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741941_1117 (size=4695811) 2024-11-13T16:33:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741941_1117 (size=4695811) 2024-11-13T16:33:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741941_1117 (size=4695811) 2024-11-13T16:33:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741942_1118 (size=232957) 2024-11-13T16:33:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741942_1118 (size=232957) 2024-11-13T16:33:48,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741942_1118 (size=232957) 2024-11-13T16:33:48,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741943_1119 (size=127628) 2024-11-13T16:33:48,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741943_1119 (size=127628) 2024-11-13T16:33:48,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741943_1119 (size=127628) 2024-11-13T16:33:48,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741944_1120 (size=20406) 2024-11-13T16:33:48,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741944_1120 (size=20406) 2024-11-13T16:33:48,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741944_1120 (size=20406) 2024-11-13T16:33:48,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741945_1121 (size=5175431) 2024-11-13T16:33:48,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741945_1121 (size=5175431) 2024-11-13T16:33:48,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741945_1121 (size=5175431) 2024-11-13T16:33:48,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741946_1122 (size=440392) 2024-11-13T16:33:48,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741946_1122 (size=440392) 2024-11-13T16:33:48,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741946_1122 (size=440392) 2024-11-13T16:33:48,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741947_1123 (size=217634) 2024-11-13T16:33:48,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741947_1123 (size=217634) 2024-11-13T16:33:48,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741947_1123 (size=217634) 2024-11-13T16:33:48,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741948_1124 (size=1832290) 2024-11-13T16:33:48,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741948_1124 (size=1832290) 2024-11-13T16:33:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741948_1124 (size=1832290) 2024-11-13T16:33:48,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741949_1125 (size=322274) 2024-11-13T16:33:48,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741949_1125 (size=322274) 2024-11-13T16:33:48,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741949_1125 (size=322274) 2024-11-13T16:33:48,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741950_1126 (size=503880) 2024-11-13T16:33:48,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741950_1126 (size=503880) 2024-11-13T16:33:48,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741950_1126 (size=503880) 2024-11-13T16:33:48,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741951_1127 (size=29229) 2024-11-13T16:33:48,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741951_1127 (size=29229) 2024-11-13T16:33:48,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741951_1127 (size=29229) 2024-11-13T16:33:48,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741952_1128 (size=24096) 2024-11-13T16:33:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741952_1128 (size=24096) 2024-11-13T16:33:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741952_1128 (size=24096) 2024-11-13T16:33:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741953_1129 (size=111872) 2024-11-13T16:33:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741953_1129 (size=111872) 2024-11-13T16:33:49,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741953_1129 (size=111872) 2024-11-13T16:33:49,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741954_1130 (size=45609) 2024-11-13T16:33:49,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741954_1130 (size=45609) 2024-11-13T16:33:49,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741954_1130 (size=45609) 2024-11-13T16:33:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741955_1131 (size=136454) 2024-11-13T16:33:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741955_1131 (size=136454) 2024-11-13T16:33:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741955_1131 (size=136454) 2024-11-13T16:33:49,220 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:33:49,226 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-13T16:33:49,230 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=32.9 K 2024-11-13T16:33:49,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741956_1132 (size=686) 2024-11-13T16:33:49,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741956_1132 (size=686) 2024-11-13T16:33:49,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741956_1132 (size=686) 2024-11-13T16:33:49,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741957_1133 (size=15) 2024-11-13T16:33:49,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741957_1133 (size=15) 2024-11-13T16:33:49,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741957_1133 (size=15) 2024-11-13T16:33:49,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741958_1134 (size=303726) 2024-11-13T16:33:49,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741958_1134 (size=303726) 2024-11-13T16:33:49,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741958_1134 (size=303726) 2024-11-13T16:33:49,541 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:33:49,545 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:33:49,588 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0002_000001 (auth:SIMPLE) from 127.0.0.1:39872 2024-11-13T16:34:00,775 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0002_000001 (auth:SIMPLE) from 127.0.0.1:35642 2024-11-13T16:34:01,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741959_1135 (size=349376) 2024-11-13T16:34:01,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741959_1135 (size=349376) 2024-11-13T16:34:01,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741959_1135 (size=349376) 2024-11-13T16:34:03,447 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0002_000001 (auth:SIMPLE) from 127.0.0.1:33662 2024-11-13T16:34:05,906 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:34:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741960_1136 (size=14519) 2024-11-13T16:34:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741960_1136 (size=14519) 2024-11-13T16:34:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741960_1136 (size=14519) 2024-11-13T16:34:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741961_1137 (size=8172) 2024-11-13T16:34:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741961_1137 (size=8172) 2024-11-13T16:34:13,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741961_1137 (size=8172) 2024-11-13T16:34:13,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741962_1138 (size=5878) 2024-11-13T16:34:13,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741962_1138 (size=5878) 2024-11-13T16:34:13,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741962_1138 (size=5878) 2024-11-13T16:34:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741963_1139 (size=5102) 2024-11-13T16:34:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741963_1139 (size=5102) 2024-11-13T16:34:13,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741963_1139 (size=5102) 2024-11-13T16:34:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741964_1140 (size=17462) 2024-11-13T16:34:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741964_1140 (size=17462) 2024-11-13T16:34:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741964_1140 (size=17462) 2024-11-13T16:34:13,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741965_1141 (size=462) 2024-11-13T16:34:13,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741965_1141 (size=462) 2024-11-13T16:34:13,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741965_1141 (size=462) 2024-11-13T16:34:14,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741966_1142 (size=17462) 2024-11-13T16:34:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741966_1142 (size=17462) 2024-11-13T16:34:14,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741966_1142 (size=17462) 2024-11-13T16:34:14,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741967_1143 (size=349376) 2024-11-13T16:34:14,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741967_1143 (size=349376) 2024-11-13T16:34:14,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741967_1143 (size=349376) 2024-11-13T16:34:14,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0002_000001 (auth:SIMPLE) from 127.0.0.1:37912 2024-11-13T16:34:15,847 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3b9925ab6291465de82c43a01aa5ea9a changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:34:15,848 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 55e47a9f1f061450128f3dc9bb282520 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:34:15,848 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e74f391d9aede80a8efc10a7994ab330 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:34:15,848 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b604a0fb59df25b4445300e838beaaad changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:34:15,983 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:34:15,984 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:34:15,993 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-13T16:34:15,993 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:34:15,993 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:34:15,993 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-13T16:34:15,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-13T16:34:15,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-13T16:34:15,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-13T16:34:15,995 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-13T16:34:15,995 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515624007/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-13T16:34:16,003 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportWithResetTtl 2024-11-13T16:34:16,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-13T16:34:16,008 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515656007"}]},"ts":"1731515656007"} 2024-11-13T16:34:16,010 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-13T16:34:16,010 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-13T16:34:16,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-13T16:34:16,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, UNASSIGN}] 2024-11-13T16:34:16,014 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, UNASSIGN 2024-11-13T16:34:16,014 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, UNASSIGN 2024-11-13T16:34:16,015 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=55e47a9f1f061450128f3dc9bb282520, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:34:16,015 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e74f391d9aede80a8efc10a7994ab330, regionState=CLOSING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:34:16,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, UNASSIGN because future has completed 2024-11-13T16:34:16,017 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:34:16,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure e74f391d9aede80a8efc10a7994ab330, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:34:16,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, UNASSIGN because future has completed 2024-11-13T16:34:16,018 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:34:16,018 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 55e47a9f1f061450128f3dc9bb282520, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:34:16,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-13T16:34:16,170 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing e74f391d9aede80a8efc10a7994ab330, disabling compactions & flushes 2024-11-13T16:34:16,171 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:34:16,171 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. after waiting 0 ms 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing 55e47a9f1f061450128f3dc9bb282520, disabling compactions & flushes 2024-11-13T16:34:16,171 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. after waiting 0 ms 2024-11-13T16:34:16,171 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:34:16,176 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T16:34:16,176 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T16:34:16,176 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:34:16,176 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:34:16,177 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520. 2024-11-13T16:34:16,177 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330. 2024-11-13T16:34:16,177 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for e74f391d9aede80a8efc10a7994ab330: Waiting for close lock at 1731515656171Running coprocessor pre-close hooks at 1731515656171Disabling compacts and flushes for region at 1731515656171Disabling writes for close at 1731515656171Writing region close event to WAL at 1731515656172 (+1 ms)Running coprocessor post-close hooks at 1731515656176 (+4 ms)Closed at 1731515656177 (+1 ms) 2024-11-13T16:34:16,177 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for 55e47a9f1f061450128f3dc9bb282520: Waiting for close lock at 1731515656171Running coprocessor pre-close hooks at 1731515656171Disabling compacts and flushes for region at 1731515656171Disabling writes for close at 1731515656171Writing region close event to WAL at 1731515656172 (+1 ms)Running coprocessor post-close hooks at 1731515656176 (+4 ms)Closed at 1731515656177 (+1 ms) 2024-11-13T16:34:16,179 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:34:16,180 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e74f391d9aede80a8efc10a7994ab330, regionState=CLOSED 2024-11-13T16:34:16,180 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed 55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:34:16,180 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=55e47a9f1f061450128f3dc9bb282520, regionState=CLOSED 2024-11-13T16:34:16,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure e74f391d9aede80a8efc10a7994ab330, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:34:16,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 55e47a9f1f061450128f3dc9bb282520, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:34:16,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-13T16:34:16,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure e74f391d9aede80a8efc10a7994ab330, server=f8f85b0bb1d8,39619,1731515529105 in 166 msec 2024-11-13T16:34:16,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-13T16:34:16,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure 55e47a9f1f061450128f3dc9bb282520, server=f8f85b0bb1d8,38523,1731515529292 in 166 msec 2024-11-13T16:34:16,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e74f391d9aede80a8efc10a7994ab330, UNASSIGN in 172 msec 2024-11-13T16:34:16,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-13T16:34:16,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=55e47a9f1f061450128f3dc9bb282520, UNASSIGN in 173 msec 2024-11-13T16:34:16,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-13T16:34:16,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 177 msec 2024-11-13T16:34:16,191 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515656191"}]},"ts":"1731515656191"} 2024-11-13T16:34:16,193 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-13T16:34:16,193 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-13T16:34:16,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 191 msec 2024-11-13T16:34:16,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-13T16:34:16,329 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-13T16:34:16,329 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportWithResetTtl 2024-11-13T16:34:16,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,336 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-13T16:34:16,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-13T16:34:16,367 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:34:16,367 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:34:16,370 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/recovered.edits] 2024-11-13T16:34:16,395 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf/8f8939b5e0c448829c2f729aae82e8b6 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/cf/8f8939b5e0c448829c2f729aae82e8b6 2024-11-13T16:34:16,398 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/recovered.edits] 2024-11-13T16:34:16,407 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/recovered.edits/8.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330/recovered.edits/8.seqid 2024-11-13T16:34:16,408 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf/3f1d943c22244e60b17839002e876f02 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/cf/3f1d943c22244e60b17839002e876f02 2024-11-13T16:34:16,410 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:34:16,417 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/recovered.edits/8.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520/recovered.edits/8.seqid 2024-11-13T16:34:16,418 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportWithResetTtl/55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:34:16,418 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-13T16:34:16,419 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-13T16:34:16,420 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-11-13T16:34:16,429 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241113d9ebe5f6f5a7467185438af9d62000de_e74f391d9aede80a8efc10a7994ab330 2024-11-13T16:34:16,434 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241113c6ebba44b7b645b89af1c3673d28b6fd_55e47a9f1f061450128f3dc9bb282520 2024-11-13T16:34:16,435 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-13T16:34:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,455 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,462 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-13T16:34:16,471 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-13T16:34:16,478 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,478 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-13T16:34:16,479 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515656478"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:16,479 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515656478"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:16,484 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:34:16,484 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 55e47a9f1f061450128f3dc9bb282520, NAME => 'testExportWithResetTtl,,1731515620266.55e47a9f1f061450128f3dc9bb282520.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e74f391d9aede80a8efc10a7994ab330, NAME => 'testExportWithResetTtl,1,1731515620266.e74f391d9aede80a8efc10a7994ab330.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:34:16,484 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-13T16:34:16,485 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515656484"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:16,489 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-13T16:34:16,493 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-13T16:34:16,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 164 msec 2024-11-13T16:34:16,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-13T16:34:16,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,507 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-11-13T16:34:16,507 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T16:34:16,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-13T16:34:16,508 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-13T16:34:16,508 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-13T16:34:16,509 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithResetTtl 2024-11-13T16:34:16,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:16,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:16,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:16,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:16,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-13T16:34:16,517 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515656517"}]},"ts":"1731515656517"} 2024-11-13T16:34:16,527 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-13T16:34:16,528 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-13T16:34:16,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-13T16:34:16,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, UNASSIGN}] 2024-11-13T16:34:16,540 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, UNASSIGN 2024-11-13T16:34:16,542 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, UNASSIGN 2024-11-13T16:34:16,543 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=3b9925ab6291465de82c43a01aa5ea9a, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:34:16,545 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=b604a0fb59df25b4445300e838beaaad, regionState=CLOSING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:34:16,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, UNASSIGN because future has completed 2024-11-13T16:34:16,553 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:34:16,553 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=f8f85b0bb1d8,39619,1731515529105, table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:34:16,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:34:16,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, UNASSIGN because future has completed 2024-11-13T16:34:16,556 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:34:16,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure b604a0fb59df25b4445300e838beaaad, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:34:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-13T16:34:16,721 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:34:16,721 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:34:16,722 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing 3b9925ab6291465de82c43a01aa5ea9a, disabling compactions & flushes 2024-11-13T16:34:16,722 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:34:16,722 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:34:16,722 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. after waiting 0 ms 2024-11-13T16:34:16,722 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:34:16,727 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close b604a0fb59df25b4445300e838beaaad 2024-11-13T16:34:16,730 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:34:16,730 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing b604a0fb59df25b4445300e838beaaad, disabling compactions & flushes 2024-11-13T16:34:16,730 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:34:16,730 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:34:16,731 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. after waiting 0 ms 2024-11-13T16:34:16,731 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:34:16,742 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:34:16,747 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:34:16,747 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a. 2024-11-13T16:34:16,747 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for 3b9925ab6291465de82c43a01aa5ea9a: Waiting for close lock at 1731515656721Running coprocessor pre-close hooks at 1731515656721Disabling compacts and flushes for region at 1731515656721Disabling writes for close at 1731515656722 (+1 ms)Writing region close event to WAL at 1731515656724 (+2 ms)Running coprocessor post-close hooks at 1731515656747 (+23 ms)Closed at 1731515656747 2024-11-13T16:34:16,750 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:34:16,755 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:34:16,756 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad. 2024-11-13T16:34:16,756 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for b604a0fb59df25b4445300e838beaaad: Waiting for close lock at 1731515656730Running coprocessor pre-close hooks at 1731515656730Disabling compacts and flushes for region at 1731515656730Disabling writes for close at 1731515656731 (+1 ms)Writing region close event to WAL at 1731515656733 (+2 ms)Running coprocessor post-close hooks at 1731515656755 (+22 ms)Closed at 1731515656755 2024-11-13T16:34:16,757 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed 3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:34:16,763 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=3b9925ab6291465de82c43a01aa5ea9a, regionState=CLOSED 2024-11-13T16:34:16,766 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed b604a0fb59df25b4445300e838beaaad 2024-11-13T16:34:16,771 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=b604a0fb59df25b4445300e838beaaad, regionState=CLOSED 2024-11-13T16:34:16,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:34:16,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure b604a0fb59df25b4445300e838beaaad, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:34:16,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=54 2024-11-13T16:34:16,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure 3b9925ab6291465de82c43a01aa5ea9a, server=f8f85b0bb1d8,38437,1731515528789 in 224 msec 2024-11-13T16:34:16,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=53 2024-11-13T16:34:16,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure b604a0fb59df25b4445300e838beaaad, server=f8f85b0bb1d8,39619,1731515529105 in 222 msec 2024-11-13T16:34:16,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3b9925ab6291465de82c43a01aa5ea9a, UNASSIGN in 245 msec 2024-11-13T16:34:16,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-13T16:34:16,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b604a0fb59df25b4445300e838beaaad, UNASSIGN in 248 msec 2024-11-13T16:34:16,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-13T16:34:16,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 258 msec 2024-11-13T16:34:16,801 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515656800"}]},"ts":"1731515656800"} 2024-11-13T16:34:16,803 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-13T16:34:16,803 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-13T16:34:16,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 295 msec 2024-11-13T16:34:16,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-13T16:34:16,828 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T16:34:16,829 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithResetTtl 2024-11-13T16:34:16,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,831 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-13T16:34:16,832 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,834 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-13T16:34:16,837 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad 2024-11-13T16:34:16,837 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:34:16,839 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/recovered.edits] 2024-11-13T16:34:16,839 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/recovered.edits] 2024-11-13T16:34:16,843 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf/0ff8f75385844983a431f8d60b7c45c8 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/cf/0ff8f75385844983a431f8d60b7c45c8 2024-11-13T16:34:16,844 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf/27945d21662447eda1891733ea50cc65 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/cf/27945d21662447eda1891733ea50cc65 2024-11-13T16:34:16,847 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a/recovered.edits/9.seqid 2024-11-13T16:34:16,847 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad/recovered.edits/9.seqid 2024-11-13T16:34:16,847 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:34:16,847 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithResetTtl/b604a0fb59df25b4445300e838beaaad 2024-11-13T16:34:16,847 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-13T16:34:16,848 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-13T16:34:16,849 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-11-13T16:34:16,854 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241113a363e43c90c4437a96a617e2a4ad17a4_3b9925ab6291465de82c43a01aa5ea9a 2024-11-13T16:34:16,856 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241113e30e2af8af404513ae1920d3efcdc46c_b604a0fb59df25b4445300e838beaaad 2024-11-13T16:34:16,856 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-13T16:34:16,859 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,863 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-13T16:34:16,865 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-13T16:34:16,867 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,867 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-13T16:34:16,867 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515656867"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:16,867 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515656867"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:16,897 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:34:16,897 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b604a0fb59df25b4445300e838beaaad, NAME => 'testtb-testExportWithResetTtl,,1731515614477.b604a0fb59df25b4445300e838beaaad.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3b9925ab6291465de82c43a01aa5ea9a, NAME => 'testtb-testExportWithResetTtl,1,1731515614477.3b9925ab6291465de82c43a01aa5ea9a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:34:16,897 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-13T16:34:16,898 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515656897"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:16,904 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-13T16:34:16,905 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-13T16:34:16,907 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 76 msec 2024-11-13T16:34:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-13T16:34:16,996 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-13T16:34:16,996 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-13T16:34:17,011 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-13T16:34:17,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-13T16:34:17,018 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-13T16:34:17,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-13T16:34:17,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-13T16:34:17,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-13T16:34:17,052 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=781 (was 754) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2012677625_1 at /127.0.0.1:51518 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:33477 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2192 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 87177) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:39131 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33477 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:59010 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/f8f85b0bb1d8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:35246 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:51526 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2012677625_1 at /127.0.0.1:35214 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=809 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=2187 (was 1998) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 16) - ProcessCount LEAK? -, AvailableMemoryMB=952 (was 1435) 2024-11-13T16:34:17,053 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-11-13T16:34:17,074 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=781, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=2187, ProcessCount=19, AvailableMemoryMB=952 2024-11-13T16:34:17,074 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-11-13T16:34:17,076 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:34:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:17,079 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:34:17,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-13T16:34:17,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T16:34:17,081 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:34:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741968_1144 (size=443) 2024-11-13T16:34:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741968_1144 (size=443) 2024-11-13T16:34:17,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741968_1144 (size=443) 2024-11-13T16:34:17,111 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bd6558aadad0ab7680d2333e3db74e60, NAME => 'testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:17,111 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5a60f84059cf783589bb02682cf05ff5, NAME => 'testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:17,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741970_1146 (size=68) 2024-11-13T16:34:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741969_1145 (size=68) 2024-11-13T16:34:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741969_1145 (size=68) 2024-11-13T16:34:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741970_1146 (size=68) 2024-11-13T16:34:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741969_1145 (size=68) 2024-11-13T16:34:17,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741970_1146 (size=68) 2024-11-13T16:34:17,125 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:17,125 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 5a60f84059cf783589bb02682cf05ff5, disabling compactions & flushes 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing bd6558aadad0ab7680d2333e3db74e60, disabling compactions & flushes 2024-11-13T16:34:17,126 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,126 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. after waiting 0 ms 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. after waiting 0 ms 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,126 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,126 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5a60f84059cf783589bb02682cf05ff5: Waiting for close lock at 1731515657125Disabling compacts and flushes for region at 1731515657125Disabling writes for close at 1731515657126 (+1 ms)Writing region close event to WAL at 1731515657126Closed at 1731515657126 2024-11-13T16:34:17,126 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for bd6558aadad0ab7680d2333e3db74e60: Waiting for close lock at 1731515657125Disabling compacts and flushes for region at 1731515657125Disabling writes for close at 1731515657126 (+1 ms)Writing region close event to WAL at 1731515657126Closed at 1731515657126 2024-11-13T16:34:17,127 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:34:17,127 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731515657127"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515657127"}]},"ts":"1731515657127"} 2024-11-13T16:34:17,127 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731515657127"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515657127"}]},"ts":"1731515657127"} 2024-11-13T16:34:17,131 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:34:17,132 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:34:17,132 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515657132"}]},"ts":"1731515657132"} 2024-11-13T16:34:17,134 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-13T16:34:17,134 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:34:17,135 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:34:17,135 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:34:17,135 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:34:17,135 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:34:17,135 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:34:17,135 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:34:17,136 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:34:17,136 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:34:17,136 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:34:17,136 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:34:17,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, ASSIGN}] 2024-11-13T16:34:17,137 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, ASSIGN 2024-11-13T16:34:17,137 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, ASSIGN 2024-11-13T16:34:17,138 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:34:17,138 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:34:17,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T16:34:17,289 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:34:17,289 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=bd6558aadad0ab7680d2333e3db74e60, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:34:17,289 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=5a60f84059cf783589bb02682cf05ff5, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:34:17,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, ASSIGN because future has completed 2024-11-13T16:34:17,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd6558aadad0ab7680d2333e3db74e60, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:34:17,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, ASSIGN because future has completed 2024-11-13T16:34:17,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a60f84059cf783589bb02682cf05ff5, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:34:17,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T16:34:17,449 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,449 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,450 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => bd6558aadad0ab7680d2333e3db74e60, NAME => 'testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:34:17,450 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => 5a60f84059cf783589bb02682cf05ff5, NAME => 'testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:34:17,450 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. service=AccessControlService 2024-11-13T16:34:17,450 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. service=AccessControlService 2024-11-13T16:34:17,451 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:34:17,451 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:34:17,451 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,451 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,451 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:17,451 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:17,452 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,452 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,452 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,452 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,454 INFO [StoreOpener-bd6558aadad0ab7680d2333e3db74e60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,454 INFO [StoreOpener-5a60f84059cf783589bb02682cf05ff5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,456 INFO [StoreOpener-5a60f84059cf783589bb02682cf05ff5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a60f84059cf783589bb02682cf05ff5 columnFamilyName cf 2024-11-13T16:34:17,456 INFO [StoreOpener-bd6558aadad0ab7680d2333e3db74e60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd6558aadad0ab7680d2333e3db74e60 columnFamilyName cf 2024-11-13T16:34:17,458 DEBUG [StoreOpener-5a60f84059cf783589bb02682cf05ff5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:17,458 DEBUG [StoreOpener-bd6558aadad0ab7680d2333e3db74e60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:17,459 INFO [StoreOpener-5a60f84059cf783589bb02682cf05ff5-1 {}] regionserver.HStore(327): Store=5a60f84059cf783589bb02682cf05ff5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:34:17,459 INFO [StoreOpener-bd6558aadad0ab7680d2333e3db74e60-1 {}] regionserver.HStore(327): Store=bd6558aadad0ab7680d2333e3db74e60/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:34:17,460 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,460 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,461 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,461 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,461 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,461 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,462 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,462 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,462 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,462 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,464 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,464 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,466 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:34:17,467 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened 5a60f84059cf783589bb02682cf05ff5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71529613, jitterRate=0.06587429344654083}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:34:17,467 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,468 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for 5a60f84059cf783589bb02682cf05ff5: Running coprocessor pre-open hook at 1731515657452Writing region info on filesystem at 1731515657452Initializing all the Stores at 1731515657454 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515657454Cleaning up temporary data from old regions at 1731515657462 (+8 ms)Running coprocessor post-open hooks at 1731515657467 (+5 ms)Region opened successfully at 1731515657468 (+1 ms) 2024-11-13T16:34:17,469 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:34:17,470 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened bd6558aadad0ab7680d2333e3db74e60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71448109, jitterRate=0.06465978920459747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:34:17,470 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:17,470 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for bd6558aadad0ab7680d2333e3db74e60: Running coprocessor pre-open hook at 1731515657452Writing region info on filesystem at 1731515657452Initializing all the Stores at 1731515657454 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515657454Cleaning up temporary data from old regions at 1731515657462 (+8 ms)Running coprocessor post-open hooks at 1731515657470 (+8 ms)Region opened successfully at 1731515657470 2024-11-13T16:34:17,471 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5., pid=62, masterSystemTime=1731515657444 2024-11-13T16:34:17,471 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60., pid=61, masterSystemTime=1731515657444 2024-11-13T16:34:17,473 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,473 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:17,473 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=5a60f84059cf783589bb02682cf05ff5, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:34:17,474 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,474 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,474 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=bd6558aadad0ab7680d2333e3db74e60, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:34:17,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a60f84059cf783589bb02682cf05ff5, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:34:17,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd6558aadad0ab7680d2333e3db74e60, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:34:17,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=60 2024-11-13T16:34:17,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 5a60f84059cf783589bb02682cf05ff5, server=f8f85b0bb1d8,38523,1731515529292 in 185 msec 2024-11-13T16:34:17,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-13T16:34:17,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, ASSIGN in 347 msec 2024-11-13T16:34:17,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure bd6558aadad0ab7680d2333e3db74e60, server=f8f85b0bb1d8,38437,1731515528789 in 189 msec 2024-11-13T16:34:17,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=58 2024-11-13T16:34:17,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, ASSIGN in 349 msec 2024-11-13T16:34:17,491 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:34:17,491 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515657491"}]},"ts":"1731515657491"} 2024-11-13T16:34:17,494 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-13T16:34:17,495 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:34:17,495 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-13T16:34:17,500 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T16:34:17,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:17,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:17,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:17,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:17,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:17,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:17,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:17,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:17,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 460 msec 2024-11-13T16:34:17,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-13T16:34:17,709 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T16:34:17,709 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:17,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-13T16:34:17,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-13T16:34:17,714 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-13T16:34:17,714 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:17,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-13T16:34:17,714 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:34:17,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-13T16:34:17,723 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:17,734 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:17,753 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:17,760 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:34:17,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515657760 (current time:1731515657760). 2024-11-13T16:34:17,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:34:17,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-13T16:34:17,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:34:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16c4ebca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:17,766 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:17,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:17,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:17,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@929e1eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:17,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:17,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:17,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:17,774 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38566, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:17,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33e1198b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:17,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:17,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:17,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:17,782 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:17,784 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:17,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:17,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:17,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:17,785 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:17,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6776ee19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:17,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:17,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:17,790 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:17,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:17,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:17,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3858963a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:17,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:17,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:17,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:17,795 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:17,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fb0941, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:17,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:17,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:17,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:17,805 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:34:17,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:17,809 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41206, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:17,811 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T16:34:17,812 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:17,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:34:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:34:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-13T16:34:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T16:34:17,832 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:34:17,840 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:34:17,864 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:34:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T16:34:17,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741971_1147 (size=170) 2024-11-13T16:34:17,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741971_1147 (size=170) 2024-11-13T16:34:17,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741971_1147 (size=170) 2024-11-13T16:34:17,983 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:34:17,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5}] 2024-11-13T16:34:17,991 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:17,992 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-13T16:34:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 5a60f84059cf783589bb02682cf05ff5: 2024-11-13T16:34:18,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. for emptySnaptb0-testExportFileSystemState completed. 2024-11-13T16:34:18,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T16:34:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-13T16:34:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:18,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:34:18,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-13T16:34:18,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:18,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for bd6558aadad0ab7680d2333e3db74e60: 2024-11-13T16:34:18,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. for emptySnaptb0-testExportFileSystemState completed. 2024-11-13T16:34:18,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-13T16:34:18,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:18,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:34:18,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741972_1148 (size=71) 2024-11-13T16:34:18,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741972_1148 (size=71) 2024-11-13T16:34:18,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741972_1148 (size=71) 2024-11-13T16:34:18,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:18,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-13T16:34:18,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-13T16:34:18,209 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,209 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5 in 234 msec 2024-11-13T16:34:18,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741973_1149 (size=71) 2024-11-13T16:34:18,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741973_1149 (size=71) 2024-11-13T16:34:18,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741973_1149 (size=71) 2024-11-13T16:34:18,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:18,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-13T16:34:18,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-13T16:34:18,232 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,232 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-11-13T16:34:18,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60 in 250 msec 2024-11-13T16:34:18,235 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:34:18,236 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:34:18,237 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:34:18,238 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:34:18,238 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:18,238 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:34:18,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741974_1150 (size=63) 2024-11-13T16:34:18,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741974_1150 (size=63) 2024-11-13T16:34:18,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741974_1150 (size=63) 2024-11-13T16:34:18,252 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:34:18,253 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-13T16:34:18,254 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-13T16:34:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741975_1151 (size=653) 2024-11-13T16:34:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741975_1151 (size=653) 2024-11-13T16:34:18,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741975_1151 (size=653) 2024-11-13T16:34:18,291 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:34:18,315 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:34:18,320 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-13T16:34:18,325 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:34:18,325 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-13T16:34:18,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 511 msec 2024-11-13T16:34:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-13T16:34:18,459 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T16:34:18,471 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:34:18,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:34:18,476 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:18,480 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-13T16:34:18,480 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:18,480 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:34:18,483 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:18,490 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:18,501 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:34:18,505 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:34:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515658505 (current time:1731515658505). 2024-11-13T16:34:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:34:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-13T16:34:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:34:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fd85010, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:18,508 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:18,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:18,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:18,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42a873ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:18,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:18,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:18,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:18,510 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:18,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46a5b21b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:18,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:18,513 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:18,514 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41614, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:18,515 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:18,515 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@430c7dae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:18,518 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:18,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:18,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:18,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b79530d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:18,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:18,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:18,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:18,521 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:18,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cce7de6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:18,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:18,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:18,528 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:18,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:34:18,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:18,532 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41208, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:18,534 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T16:34:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:34:18,537 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:34:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-13T16:34:18,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T16:34:18,545 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:34:18,546 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:34:18,550 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:34:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741976_1152 (size=165) 2024-11-13T16:34:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741976_1152 (size=165) 2024-11-13T16:34:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741976_1152 (size=165) 2024-11-13T16:34:18,563 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:34:18,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5}] 2024-11-13T16:34:18,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,564 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T16:34:18,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-13T16:34:18,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-13T16:34:18,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:18,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:18,717 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 5a60f84059cf783589bb02682cf05ff5 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-13T16:34:18,719 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing bd6558aadad0ab7680d2333e3db74e60 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-13T16:34:18,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60 is 71, key is 05fa82bd27a29802cc8568568222ac42/cf:q/1731515658471/Put/seqid=0 2024-11-13T16:34:18,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741977_1153 (size=5032) 2024-11-13T16:34:18,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741977_1153 (size=5032) 2024-11-13T16:34:18,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741977_1153 (size=5032) 2024-11-13T16:34:18,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:18,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5 is 71, key is 1055cd326107d78b4728aa757d467cc4/cf:q/1731515658473/Put/seqid=0 2024-11-13T16:34:18,856 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/.tmp/cf/5ce32025bb794446bbe06a913f6401a9, store: [table=testtb-testExportFileSystemState family=cf region=bd6558aadad0ab7680d2333e3db74e60] 2024-11-13T16:34:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T16:34:18,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/.tmp/cf/5ce32025bb794446bbe06a913f6401a9 is 209, key is 084a4ab0222e4f7738a80e39daab5d09a/cf:q/1731515658471/Put/seqid=0 2024-11-13T16:34:18,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741978_1154 (size=8241) 2024-11-13T16:34:18,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741978_1154 (size=8241) 2024-11-13T16:34:18,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741978_1154 (size=8241) 2024-11-13T16:34:18,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741979_1155 (size=5709) 2024-11-13T16:34:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741979_1155 (size=5709) 2024-11-13T16:34:18,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741979_1155 (size=5709) 2024-11-13T16:34:18,902 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/.tmp/cf/5ce32025bb794446bbe06a913f6401a9 2024-11-13T16:34:18,909 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/.tmp/cf/c3b3d3606c804b5f93a0185ec4e895f3, store: [table=testtb-testExportFileSystemState family=cf region=5a60f84059cf783589bb02682cf05ff5] 2024-11-13T16:34:18,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/.tmp/cf/c3b3d3606c804b5f93a0185ec4e895f3 is 209, key is 169ae5acd638813ac47b20870837ea344/cf:q/1731515658473/Put/seqid=0 2024-11-13T16:34:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/.tmp/cf/5ce32025bb794446bbe06a913f6401a9 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf/5ce32025bb794446bbe06a913f6401a9 2024-11-13T16:34:18,929 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf/5ce32025bb794446bbe06a913f6401a9, entries=2, sequenceid=6, filesize=5.6 K 2024-11-13T16:34:18,930 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for bd6558aadad0ab7680d2333e3db74e60 in 212ms, sequenceid=6, compaction requested=false 2024-11-13T16:34:18,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-13T16:34:18,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for bd6558aadad0ab7680d2333e3db74e60: 2024-11-13T16:34:18,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. for snaptb0-testExportFileSystemState completed. 2024-11-13T16:34:18,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-13T16:34:18,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:18,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf/5ce32025bb794446bbe06a913f6401a9] hfiles 2024-11-13T16:34:18,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf/5ce32025bb794446bbe06a913f6401a9 for snapshot=snaptb0-testExportFileSystemState 2024-11-13T16:34:18,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741980_1156 (size=15202) 2024-11-13T16:34:18,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741980_1156 (size=15202) 2024-11-13T16:34:18,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741980_1156 (size=15202) 2024-11-13T16:34:18,945 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/.tmp/cf/c3b3d3606c804b5f93a0185ec4e895f3 2024-11-13T16:34:18,954 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/.tmp/cf/c3b3d3606c804b5f93a0185ec4e895f3 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf/c3b3d3606c804b5f93a0185ec4e895f3 2024-11-13T16:34:18,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741981_1157 (size=110) 2024-11-13T16:34:18,961 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf/c3b3d3606c804b5f93a0185ec4e895f3, entries=48, sequenceid=6, filesize=14.8 K 2024-11-13T16:34:18,962 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 5a60f84059cf783589bb02682cf05ff5 in 245ms, sequenceid=6, compaction requested=false 2024-11-13T16:34:18,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 5a60f84059cf783589bb02682cf05ff5: 2024-11-13T16:34:18,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. for snaptb0-testExportFileSystemState completed. 2024-11-13T16:34:18,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-13T16:34:18,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:18,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf/c3b3d3606c804b5f93a0185ec4e895f3] hfiles 2024-11-13T16:34:18,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf/c3b3d3606c804b5f93a0185ec4e895f3 for snapshot=snaptb0-testExportFileSystemState 2024-11-13T16:34:18,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741981_1157 (size=110) 2024-11-13T16:34:18,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741981_1157 (size=110) 2024-11-13T16:34:18,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:18,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-13T16:34:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-13T16:34:18,967 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,968 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:18,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bd6558aadad0ab7680d2333e3db74e60 in 406 msec 2024-11-13T16:34:18,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741982_1158 (size=110) 2024-11-13T16:34:18,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741982_1158 (size=110) 2024-11-13T16:34:18,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741982_1158 (size=110) 2024-11-13T16:34:18,981 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:18,981 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-13T16:34:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-13T16:34:18,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,982 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=68, resume processing ppid=66 2024-11-13T16:34:18,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5a60f84059cf783589bb02682cf05ff5 in 420 msec 2024-11-13T16:34:18,985 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:34:18,986 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:34:18,987 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:34:18,988 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:34:18,988 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:18,989 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60] hfiles 2024-11-13T16:34:18,989 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:18,989 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:19,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741983_1159 (size=294) 2024-11-13T16:34:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741983_1159 (size=294) 2024-11-13T16:34:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741983_1159 (size=294) 2024-11-13T16:34:19,006 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:34:19,006 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-13T16:34:19,009 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-13T16:34:19,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741984_1160 (size=963) 2024-11-13T16:34:19,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741984_1160 (size=963) 2024-11-13T16:34:19,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741984_1160 (size=963) 2024-11-13T16:34:19,102 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:34:19,142 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:34:19,145 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-13T16:34:19,151 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:34:19,151 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-13T16:34:19,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 615 msec 2024-11-13T16:34:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-13T16:34:19,169 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T16:34:19,169 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169 2024-11-13T16:34:19,169 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:19,210 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0002/container_1731515545832_0002_01_000002/launch_container.sh] 2024-11-13T16:34:19,210 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0002/container_1731515545832_0002_01_000002/container_tokens] 2024-11-13T16:34:19,211 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0002/container_1731515545832_0002_01_000002/sysfs] 2024-11-13T16:34:19,220 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:19,220 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-13T16:34:19,225 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:34:19,241 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-13T16:34:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741985_1161 (size=165) 2024-11-13T16:34:19,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741985_1161 (size=165) 2024-11-13T16:34:19,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741985_1161 (size=165) 2024-11-13T16:34:19,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741986_1162 (size=963) 2024-11-13T16:34:19,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741986_1162 (size=963) 2024-11-13T16:34:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741986_1162 (size=963) 2024-11-13T16:34:19,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:19,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:19,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:20,289 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0002_000001 (auth:SIMPLE) from 127.0.0.1:38090 2024-11-13T16:34:20,312 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_0/usercache/jenkins/appcache/application_1731515545832_0002/container_1731515545832_0002_01_000001/launch_container.sh] 2024-11-13T16:34:20,313 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_0/usercache/jenkins/appcache/application_1731515545832_0002/container_1731515545832_0002_01_000001/container_tokens] 2024-11-13T16:34:20,313 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_0/usercache/jenkins/appcache/application_1731515545832_0002/container_1731515545832_0002_01_000001/sysfs] 2024-11-13T16:34:21,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-5460098021404083685.jar 2024-11-13T16:34:21,002 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,003 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-6799341564566407311.jar 2024-11-13T16:34:21,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:21,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:34:21,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:34:21,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:34:21,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:34:21,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:34:21,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:34:21,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:34:21,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:34:21,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:34:21,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:34:21,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:34:21,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:21,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:21,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:34:21,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:21,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:21,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:34:21,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:34:21,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741987_1163 (size=131440) 2024-11-13T16:34:21,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741987_1163 (size=131440) 2024-11-13T16:34:21,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741987_1163 (size=131440) 2024-11-13T16:34:21,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741988_1164 (size=4188619) 2024-11-13T16:34:21,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741988_1164 (size=4188619) 2024-11-13T16:34:21,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741988_1164 (size=4188619) 2024-11-13T16:34:21,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741989_1165 (size=1323991) 2024-11-13T16:34:21,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741989_1165 (size=1323991) 2024-11-13T16:34:21,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741989_1165 (size=1323991) 2024-11-13T16:34:21,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741990_1166 (size=903731) 2024-11-13T16:34:21,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741990_1166 (size=903731) 2024-11-13T16:34:21,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741990_1166 (size=903731) 2024-11-13T16:34:21,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741991_1167 (size=8360083) 2024-11-13T16:34:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741991_1167 (size=8360083) 2024-11-13T16:34:21,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741991_1167 (size=8360083) 2024-11-13T16:34:21,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741992_1168 (size=440392) 2024-11-13T16:34:21,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741992_1168 (size=440392) 2024-11-13T16:34:21,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741992_1168 (size=440392) 2024-11-13T16:34:21,719 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:34:21,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741993_1169 (size=1877034) 2024-11-13T16:34:21,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741993_1169 (size=1877034) 2024-11-13T16:34:21,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741993_1169 (size=1877034) 2024-11-13T16:34:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741994_1170 (size=77835) 2024-11-13T16:34:21,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741994_1170 (size=77835) 2024-11-13T16:34:21,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741994_1170 (size=77835) 2024-11-13T16:34:21,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741995_1171 (size=30949) 2024-11-13T16:34:21,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741995_1171 (size=30949) 2024-11-13T16:34:21,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741995_1171 (size=30949) 2024-11-13T16:34:22,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741996_1172 (size=1597327) 2024-11-13T16:34:22,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741996_1172 (size=1597327) 2024-11-13T16:34:22,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741996_1172 (size=1597327) 2024-11-13T16:34:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741997_1173 (size=4695811) 2024-11-13T16:34:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741997_1173 (size=4695811) 2024-11-13T16:34:22,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741997_1173 (size=4695811) 2024-11-13T16:34:22,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741998_1174 (size=232957) 2024-11-13T16:34:22,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741998_1174 (size=232957) 2024-11-13T16:34:22,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741998_1174 (size=232957) 2024-11-13T16:34:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741999_1175 (size=127628) 2024-11-13T16:34:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741999_1175 (size=127628) 2024-11-13T16:34:22,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741999_1175 (size=127628) 2024-11-13T16:34:22,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742000_1176 (size=20406) 2024-11-13T16:34:22,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742000_1176 (size=20406) 2024-11-13T16:34:22,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742000_1176 (size=20406) 2024-11-13T16:34:22,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742001_1177 (size=6424544) 2024-11-13T16:34:22,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742001_1177 (size=6424544) 2024-11-13T16:34:22,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742001_1177 (size=6424544) 2024-11-13T16:34:22,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742002_1178 (size=5175431) 2024-11-13T16:34:22,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742002_1178 (size=5175431) 2024-11-13T16:34:22,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742002_1178 (size=5175431) 2024-11-13T16:34:22,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742003_1179 (size=217634) 2024-11-13T16:34:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742003_1179 (size=217634) 2024-11-13T16:34:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742003_1179 (size=217634) 2024-11-13T16:34:22,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742004_1180 (size=1832290) 2024-11-13T16:34:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742004_1180 (size=1832290) 2024-11-13T16:34:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742004_1180 (size=1832290) 2024-11-13T16:34:22,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742005_1181 (size=322274) 2024-11-13T16:34:22,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742005_1181 (size=322274) 2024-11-13T16:34:22,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742005_1181 (size=322274) 2024-11-13T16:34:22,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742006_1182 (size=503880) 2024-11-13T16:34:22,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742006_1182 (size=503880) 2024-11-13T16:34:22,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742006_1182 (size=503880) 2024-11-13T16:34:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742007_1183 (size=29229) 2024-11-13T16:34:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742007_1183 (size=29229) 2024-11-13T16:34:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742007_1183 (size=29229) 2024-11-13T16:34:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742008_1184 (size=24096) 2024-11-13T16:34:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742008_1184 (size=24096) 2024-11-13T16:34:22,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742008_1184 (size=24096) 2024-11-13T16:34:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742009_1185 (size=111872) 2024-11-13T16:34:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742009_1185 (size=111872) 2024-11-13T16:34:22,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742009_1185 (size=111872) 2024-11-13T16:34:22,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742010_1186 (size=45609) 2024-11-13T16:34:22,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742010_1186 (size=45609) 2024-11-13T16:34:22,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742010_1186 (size=45609) 2024-11-13T16:34:23,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742011_1187 (size=136454) 2024-11-13T16:34:23,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742011_1187 (size=136454) 2024-11-13T16:34:23,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742011_1187 (size=136454) 2024-11-13T16:34:23,004 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:34:23,022 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-13T16:34:23,032 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.4 K 2024-11-13T16:34:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742012_1188 (size=726) 2024-11-13T16:34:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742012_1188 (size=726) 2024-11-13T16:34:23,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742012_1188 (size=726) 2024-11-13T16:34:23,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742013_1189 (size=15) 2024-11-13T16:34:23,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742013_1189 (size=15) 2024-11-13T16:34:23,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742013_1189 (size=15) 2024-11-13T16:34:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742014_1190 (size=303734) 2024-11-13T16:34:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742014_1190 (size=303734) 2024-11-13T16:34:23,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742014_1190 (size=303734) 2024-11-13T16:34:23,536 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:34:23,536 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:34:24,094 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0003_000001 (auth:SIMPLE) from 127.0.0.1:41758 2024-11-13T16:34:34,324 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0003_000001 (auth:SIMPLE) from 127.0.0.1:60662 2024-11-13T16:34:34,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742015_1191 (size=349384) 2024-11-13T16:34:34,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742015_1191 (size=349384) 2024-11-13T16:34:34,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742015_1191 (size=349384) 2024-11-13T16:34:35,906 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:34:36,774 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0003_000001 (auth:SIMPLE) from 127.0.0.1:40004 2024-11-13T16:34:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742016_1192 (size=15202) 2024-11-13T16:34:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742016_1192 (size=15202) 2024-11-13T16:34:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742016_1192 (size=15202) 2024-11-13T16:34:42,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742017_1193 (size=8241) 2024-11-13T16:34:42,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742017_1193 (size=8241) 2024-11-13T16:34:42,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742017_1193 (size=8241) 2024-11-13T16:34:42,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742018_1194 (size=5709) 2024-11-13T16:34:42,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742018_1194 (size=5709) 2024-11-13T16:34:42,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742018_1194 (size=5709) 2024-11-13T16:34:42,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742019_1195 (size=5032) 2024-11-13T16:34:42,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742019_1195 (size=5032) 2024-11-13T16:34:42,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742019_1195 (size=5032) 2024-11-13T16:34:42,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742020_1196 (size=17462) 2024-11-13T16:34:42,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742020_1196 (size=17462) 2024-11-13T16:34:42,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742020_1196 (size=17462) 2024-11-13T16:34:42,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742021_1197 (size=465) 2024-11-13T16:34:42,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742021_1197 (size=465) 2024-11-13T16:34:42,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742021_1197 (size=465) 2024-11-13T16:34:42,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742022_1198 (size=17462) 2024-11-13T16:34:42,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742022_1198 (size=17462) 2024-11-13T16:34:42,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742022_1198 (size=17462) 2024-11-13T16:34:42,580 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0003/container_1731515545832_0003_01_000002/launch_container.sh] 2024-11-13T16:34:42,580 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0003/container_1731515545832_0003_01_000002/container_tokens] 2024-11-13T16:34:42,580 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0003/container_1731515545832_0003_01_000002/sysfs] 2024-11-13T16:34:42,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742023_1199 (size=349384) 2024-11-13T16:34:42,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742023_1199 (size=349384) 2024-11-13T16:34:42,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742023_1199 (size=349384) 2024-11-13T16:34:42,648 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0003_000001 (auth:SIMPLE) from 127.0.0.1:40016 2024-11-13T16:34:43,883 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:34:43,886 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:34:43,914 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-13T16:34:43,914 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:34:43,915 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:34:43,915 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-13T16:34:43,916 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-13T16:34:43,916 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-13T16:34:43,916 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-13T16:34:43,917 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-13T16:34:43,917 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515659169/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-13T16:34:43,963 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemState 2024-11-13T16:34:43,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:43,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-13T16:34:43,975 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515683975"}]},"ts":"1731515683975"} 2024-11-13T16:34:43,984 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-13T16:34:43,984 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-13T16:34:43,986 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-13T16:34:43,990 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, UNASSIGN}] 2024-11-13T16:34:43,995 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, UNASSIGN 2024-11-13T16:34:43,995 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, UNASSIGN 2024-11-13T16:34:44,000 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=bd6558aadad0ab7680d2333e3db74e60, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:34:44,001 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=5a60f84059cf783589bb02682cf05ff5, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:34:44,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, UNASSIGN because future has completed 2024-11-13T16:34:44,002 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:34:44,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd6558aadad0ab7680d2333e3db74e60, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:34:44,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, UNASSIGN because future has completed 2024-11-13T16:34:44,004 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:34:44,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5a60f84059cf783589bb02682cf05ff5, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:34:44,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-13T16:34:44,162 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:44,162 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing bd6558aadad0ab7680d2333e3db74e60, disabling compactions & flushes 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing 5a60f84059cf783589bb02682cf05ff5, disabling compactions & flushes 2024-11-13T16:34:44,163 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:44,163 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. after waiting 0 ms 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. after waiting 0 ms 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:44,163 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:44,208 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:34:44,213 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:34:44,213 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60. 2024-11-13T16:34:44,213 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for bd6558aadad0ab7680d2333e3db74e60: Waiting for close lock at 1731515684163Running coprocessor pre-close hooks at 1731515684163Disabling compacts and flushes for region at 1731515684163Disabling writes for close at 1731515684163Writing region close event to WAL at 1731515684182 (+19 ms)Running coprocessor post-close hooks at 1731515684213 (+31 ms)Closed at 1731515684213 2024-11-13T16:34:44,222 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:44,222 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:34:44,222 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=bd6558aadad0ab7680d2333e3db74e60, regionState=CLOSED 2024-11-13T16:34:44,226 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:34:44,227 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5. 2024-11-13T16:34:44,227 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for 5a60f84059cf783589bb02682cf05ff5: Waiting for close lock at 1731515684163Running coprocessor pre-close hooks at 1731515684163Disabling compacts and flushes for region at 1731515684163Disabling writes for close at 1731515684163Writing region close event to WAL at 1731515684181 (+18 ms)Running coprocessor post-close hooks at 1731515684226 (+45 ms)Closed at 1731515684227 (+1 ms) 2024-11-13T16:34:44,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd6558aadad0ab7680d2333e3db74e60, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:34:44,234 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed 5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:44,235 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=5a60f84059cf783589bb02682cf05ff5, regionState=CLOSED 2024-11-13T16:34:44,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-13T16:34:44,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure bd6558aadad0ab7680d2333e3db74e60, server=f8f85b0bb1d8,38437,1731515528789 in 229 msec 2024-11-13T16:34:44,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5a60f84059cf783589bb02682cf05ff5, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:34:44,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=bd6558aadad0ab7680d2333e3db74e60, UNASSIGN in 249 msec 2024-11-13T16:34:44,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=72 2024-11-13T16:34:44,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 5a60f84059cf783589bb02682cf05ff5, server=f8f85b0bb1d8,38523,1731515529292 in 241 msec 2024-11-13T16:34:44,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=70 2024-11-13T16:34:44,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5a60f84059cf783589bb02682cf05ff5, UNASSIGN in 261 msec 2024-11-13T16:34:44,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-13T16:34:44,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 275 msec 2024-11-13T16:34:44,265 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515684265"}]},"ts":"1731515684265"} 2024-11-13T16:34:44,269 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-13T16:34:44,270 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-13T16:34:44,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 307 msec 2024-11-13T16:34:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-13T16:34:44,288 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T16:34:44,289 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemState 2024-11-13T16:34:44,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:44,293 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:44,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-13T16:34:44,295 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:44,304 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-13T16:34:44,304 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:44,309 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/recovered.edits] 2024-11-13T16:34:44,310 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:44,318 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/recovered.edits] 2024-11-13T16:34:44,322 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf/c3b3d3606c804b5f93a0185ec4e895f3 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/cf/c3b3d3606c804b5f93a0185ec4e895f3 2024-11-13T16:34:44,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T16:34:44,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T16:34:44,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T16:34:44,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-13T16:34:44,328 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf/5ce32025bb794446bbe06a913f6401a9 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/cf/5ce32025bb794446bbe06a913f6401a9 2024-11-13T16:34:44,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:44,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:44,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:44,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-13T16:34:44,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:44,337 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5/recovered.edits/9.seqid 2024-11-13T16:34:44,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-13T16:34:44,339 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:44,340 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60/recovered.edits/9.seqid 2024-11-13T16:34:44,340 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemState/bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:44,340 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-13T16:34:44,341 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-13T16:34:44,342 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-11-13T16:34:44,349 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241113971a96476dc3490196066e1bc489a511_5a60f84059cf783589bb02682cf05ff5 2024-11-13T16:34:44,353 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241113cbcac618b008441a88205bb049684df6_bd6558aadad0ab7680d2333e3db74e60 2024-11-13T16:34:44,354 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-13T16:34:44,360 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:44,365 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-13T16:34:44,371 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-13T16:34:44,380 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:44,380 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-13T16:34:44,380 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515684380"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:44,381 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515684380"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:44,394 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:34:44,394 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => bd6558aadad0ab7680d2333e3db74e60, NAME => 'testtb-testExportFileSystemState,,1731515657075.bd6558aadad0ab7680d2333e3db74e60.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5a60f84059cf783589bb02682cf05ff5, NAME => 'testtb-testExportFileSystemState,1,1731515657075.5a60f84059cf783589bb02682cf05ff5.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:34:44,394 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-13T16:34:44,395 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515684394"}]},"ts":"9223372036854775807"} 2024-11-13T16:34:44,399 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-13T16:34:44,400 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-13T16:34:44,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 111 msec 2024-11-13T16:34:44,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-13T16:34:44,448 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-13T16:34:44,449 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-13T16:34:44,456 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-13T16:34:44,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-13T16:34:44,461 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-13T16:34:44,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-13T16:34:44,490 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=791 (was 781) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:44069 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:56818 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:51878 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 90587) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44069 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1187570609_1 at /127.0.0.1:48070 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:57098 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:39789 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-2947 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1187570609_1 at /127.0.0.1:39520 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 809), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1826 (was 2187), ProcessCount=19 (was 19), AvailableMemoryMB=1786 (was 952) - AvailableMemoryMB LEAK? - 2024-11-13T16:34:44,490 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-13T16:34:44,512 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=791, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=1826, ProcessCount=19, AvailableMemoryMB=1785 2024-11-13T16:34:44,513 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-13T16:34:44,515 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:34:44,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:34:44,527 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:34:44,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-13T16:34:44,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T16:34:44,528 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:34:44,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742024_1200 (size=440) 2024-11-13T16:34:44,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742024_1200 (size=440) 2024-11-13T16:34:44,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742024_1200 (size=440) 2024-11-13T16:34:44,543 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fd0f18ffa0f4b7aeeb47ae49f5eb67b4, NAME => 'testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:44,543 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 215e247718bad878e8defc94ed34ee43, NAME => 'testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742026_1202 (size=65) 2024-11-13T16:34:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742026_1202 (size=65) 2024-11-13T16:34:44,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742026_1202 (size=65) 2024-11-13T16:34:44,563 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:44,563 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing fd0f18ffa0f4b7aeeb47ae49f5eb67b4, disabling compactions & flushes 2024-11-13T16:34:44,563 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,563 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,563 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. after waiting 0 ms 2024-11-13T16:34:44,563 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,563 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,563 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for fd0f18ffa0f4b7aeeb47ae49f5eb67b4: Waiting for close lock at 1731515684563Disabling compacts and flushes for region at 1731515684563Disabling writes for close at 1731515684563Writing region close event to WAL at 1731515684563Closed at 1731515684563 2024-11-13T16:34:44,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742025_1201 (size=65) 2024-11-13T16:34:44,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742025_1201 (size=65) 2024-11-13T16:34:44,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742025_1201 (size=65) 2024-11-13T16:34:44,567 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:44,567 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 215e247718bad878e8defc94ed34ee43, disabling compactions & flushes 2024-11-13T16:34:44,567 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,567 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,567 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. after waiting 0 ms 2024-11-13T16:34:44,567 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,567 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,567 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 215e247718bad878e8defc94ed34ee43: Waiting for close lock at 1731515684567Disabling compacts and flushes for region at 1731515684567Disabling writes for close at 1731515684567Writing region close event to WAL at 1731515684567Closed at 1731515684567 2024-11-13T16:34:44,568 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:34:44,569 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731515684568"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515684568"}]},"ts":"1731515684568"} 2024-11-13T16:34:44,569 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731515684568"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515684568"}]},"ts":"1731515684568"} 2024-11-13T16:34:44,572 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:34:44,574 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:34:44,574 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515684574"}]},"ts":"1731515684574"} 2024-11-13T16:34:44,577 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-13T16:34:44,577 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:34:44,579 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:34:44,579 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:34:44,579 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:34:44,579 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:34:44,579 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:34:44,580 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:34:44,580 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:34:44,580 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:34:44,580 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:34:44,580 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:34:44,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, ASSIGN}] 2024-11-13T16:34:44,582 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, ASSIGN 2024-11-13T16:34:44,582 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, ASSIGN 2024-11-13T16:34:44,584 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:34:44,584 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:34:44,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T16:34:44,735 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:34:44,735 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=215e247718bad878e8defc94ed34ee43, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:34:44,735 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:34:44,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, ASSIGN because future has completed 2024-11-13T16:34:44,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 215e247718bad878e8defc94ed34ee43, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:34:44,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, ASSIGN because future has completed 2024-11-13T16:34:44,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:34:44,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T16:34:44,895 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,896 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => 215e247718bad878e8defc94ed34ee43, NAME => 'testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:34:44,896 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. service=AccessControlService 2024-11-13T16:34:44,896 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:34:44,896 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,896 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:44,897 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,897 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,897 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,898 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => fd0f18ffa0f4b7aeeb47ae49f5eb67b4, NAME => 'testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:34:44,898 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. service=AccessControlService 2024-11-13T16:34:44,898 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:34:44,898 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,899 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:34:44,899 INFO [StoreOpener-215e247718bad878e8defc94ed34ee43-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,899 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,899 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,900 INFO [StoreOpener-215e247718bad878e8defc94ed34ee43-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 215e247718bad878e8defc94ed34ee43 columnFamilyName cf 2024-11-13T16:34:44,902 DEBUG [StoreOpener-215e247718bad878e8defc94ed34ee43-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:44,902 INFO [StoreOpener-215e247718bad878e8defc94ed34ee43-1 {}] regionserver.HStore(327): Store=215e247718bad878e8defc94ed34ee43/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:34:44,903 INFO [StoreOpener-fd0f18ffa0f4b7aeeb47ae49f5eb67b4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,903 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,904 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,905 INFO [StoreOpener-fd0f18ffa0f4b7aeeb47ae49f5eb67b4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd0f18ffa0f4b7aeeb47ae49f5eb67b4 columnFamilyName cf 2024-11-13T16:34:44,905 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,905 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,905 DEBUG [StoreOpener-fd0f18ffa0f4b7aeeb47ae49f5eb67b4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:44,905 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,906 INFO [StoreOpener-fd0f18ffa0f4b7aeeb47ae49f5eb67b4-1 {}] regionserver.HStore(327): Store=fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:34:44,906 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,907 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,907 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,909 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,910 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:34:44,911 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened 215e247718bad878e8defc94ed34ee43; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62328646, jitterRate=-0.07123079895973206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:34:44,911 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:44,911 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:34:44,912 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for 215e247718bad878e8defc94ed34ee43: Running coprocessor pre-open hook at 1731515684897Writing region info on filesystem at 1731515684897Initializing all the Stores at 1731515684898 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515684898Cleaning up temporary data from old regions at 1731515684905 (+7 ms)Running coprocessor post-open hooks at 1731515684911 (+6 ms)Region opened successfully at 1731515684912 (+1 ms) 2024-11-13T16:34:44,912 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened fd0f18ffa0f4b7aeeb47ae49f5eb67b4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62416086, jitterRate=-0.06992784142494202}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:34:44,912 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:44,912 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for fd0f18ffa0f4b7aeeb47ae49f5eb67b4: Running coprocessor pre-open hook at 1731515684899Writing region info on filesystem at 1731515684899Initializing all the Stores at 1731515684901 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515684901Cleaning up temporary data from old regions at 1731515684908 (+7 ms)Running coprocessor post-open hooks at 1731515684912 (+4 ms)Region opened successfully at 1731515684912 2024-11-13T16:34:44,915 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4., pid=80, masterSystemTime=1731515684893 2024-11-13T16:34:44,915 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43., pid=79, masterSystemTime=1731515684891 2024-11-13T16:34:44,918 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,918 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:44,923 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=215e247718bad878e8defc94ed34ee43, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:34:44,924 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:34:44,931 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,931 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:44,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 215e247718bad878e8defc94ed34ee43, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:34:44,937 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=f8f85b0bb1d8,38523,1731515529292, table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:34:44,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:34:44,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=78 2024-11-13T16:34:44,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 215e247718bad878e8defc94ed34ee43, server=f8f85b0bb1d8,38523,1731515529292 in 206 msec 2024-11-13T16:34:44,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=77 2024-11-13T16:34:44,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4, server=f8f85b0bb1d8,39619,1731515529105 in 208 msec 2024-11-13T16:34:44,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, ASSIGN in 378 msec 2024-11-13T16:34:44,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-13T16:34:44,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, ASSIGN in 381 msec 2024-11-13T16:34:44,968 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:34:44,969 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515684968"}]},"ts":"1731515684968"} 2024-11-13T16:34:44,972 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-13T16:34:44,973 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:34:44,974 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-13T16:34:44,980 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-13T16:34:45,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:45,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:45,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:45,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:34:45,109 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:45,110 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:45,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:34:45,110 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:45,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-13T16:34:45,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 595 msec 2024-11-13T16:34:45,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-13T16:34:45,163 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T16:34:45,163 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:45,178 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-13T16:34:45,178 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:45,178 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:34:45,185 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:45,204 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:45,221 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:45,231 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T16:34:45,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515685231 (current time:1731515685231). 2024-11-13T16:34:45,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:34:45,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-13T16:34:45,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:34:45,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a921c22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:45,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:45,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:45,234 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:45,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:45,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:45,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b56ee2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:45,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:45,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:45,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:45,238 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59614, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:45,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4739c2a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:45,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:45,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:45,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:45,242 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40014, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:45,247 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:45,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:45,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:45,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:45,247 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:45,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61dd7820, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:45,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:45,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:45,252 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:45,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:45,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:45,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e5f8091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:45,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:45,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:45,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:45,255 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:45,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@409f680e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:45,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:45,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:45,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:45,260 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:45,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:34:45,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:45,266 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:45,269 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:45,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:45,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:45,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:45,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-13T16:34:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:34:45,272 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:45,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T16:34:45,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-13T16:34:45,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T16:34:45,279 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:34:45,282 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:34:45,289 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:34:45,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742027_1203 (size=161) 2024-11-13T16:34:45,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742027_1203 (size=161) 2024-11-13T16:34:45,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742027_1203 (size=161) 2024-11-13T16:34:45,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T16:34:45,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T16:34:45,724 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:34:45,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43}] 2024-11-13T16:34:45,726 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:45,726 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:45,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-13T16:34:45,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-13T16:34:45,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:45,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 215e247718bad878e8defc94ed34ee43: 2024-11-13T16:34:45,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. for emptySnaptb0-testConsecutiveExports completed. 2024-11-13T16:34:45,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for fd0f18ffa0f4b7aeeb47ae49f5eb67b4: 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. for emptySnaptb0-testConsecutiveExports completed. 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-13T16:34:45,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:45,882 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:34:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742028_1204 (size=68) 2024-11-13T16:34:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742028_1204 (size=68) 2024-11-13T16:34:45,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742028_1204 (size=68) 2024-11-13T16:34:45,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:45,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-13T16:34:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-13T16:34:45,898 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:45,898 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:45,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43 in 175 msec 2024-11-13T16:34:45,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742029_1205 (size=68) 2024-11-13T16:34:45,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742029_1205 (size=68) 2024-11-13T16:34:45,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742029_1205 (size=68) 2024-11-13T16:34:45,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:45,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-13T16:34:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-13T16:34:45,904 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:45,904 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:45,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=82, resume processing ppid=81 2024-11-13T16:34:45,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4 in 181 msec 2024-11-13T16:34:45,907 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:34:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T16:34:45,909 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:34:45,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:34:45,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:34:45,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:45,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:34:45,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742030_1206 (size=60) 2024-11-13T16:34:45,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742030_1206 (size=60) 2024-11-13T16:34:45,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742030_1206 (size=60) 2024-11-13T16:34:45,931 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:34:45,931 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-13T16:34:45,932 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-13T16:34:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742031_1207 (size=641) 2024-11-13T16:34:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742031_1207 (size=641) 2024-11-13T16:34:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742031_1207 (size=641) 2024-11-13T16:34:45,961 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:34:45,971 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:34:45,972 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-13T16:34:45,975 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:34:45,975 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-13T16:34:45,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 704 msec 2024-11-13T16:34:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-13T16:34:46,419 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T16:34:46,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39619 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:34:46,434 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:34:46,439 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:46,443 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-13T16:34:46,443 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:46,444 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:34:46,446 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:46,464 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:46,487 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-13T16:34:46,492 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T16:34:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515686492 (current time:1731515686492). 2024-11-13T16:34:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:34:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-13T16:34:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:34:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40696d31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:46,496 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:46,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:46,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:46,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30f42080, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:46,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:46,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:46,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:46,499 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:46,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73be762a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:46,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:46,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:46,503 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40024, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:46,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:46,504 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52cc227e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:34:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:34:46,507 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:34:46,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:34:46,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:34:46,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@233b964b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:46,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:34:46,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:34:46,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:46,509 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:34:46,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2396efd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:34:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:34:46,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:34:46,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:46,521 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40030, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:46,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:34:46,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:34:46,529 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:34:46,530 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809. 2024-11-13T16:34:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:34:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:34:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-13T16:34:46,531 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:34:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:34:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-13T16:34:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-13T16:34:46,545 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:34:46,547 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:34:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T16:34:46,552 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:34:46,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742032_1208 (size=156) 2024-11-13T16:34:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742032_1208 (size=156) 2024-11-13T16:34:46,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742032_1208 (size=156) 2024-11-13T16:34:46,623 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:34:46,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43}] 2024-11-13T16:34:46,631 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:46,634 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T16:34:46,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-13T16:34:46,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:46,786 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 215e247718bad878e8defc94ed34ee43 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-13T16:34:46,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-13T16:34:46,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:46,792 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing fd0f18ffa0f4b7aeeb47ae49f5eb67b4 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-13T16:34:46,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43 is 71, key is 144b150ba2d5780b101f5d7148f28762/cf:q/1731515686434/Put/seqid=0 2024-11-13T16:34:46,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4 is 71, key is 038e1739dde665b1ecfa686857b9f7dc/cf:q/1731515686431/Put/seqid=0 2024-11-13T16:34:46,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742033_1209 (size=8171) 2024-11-13T16:34:46,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742033_1209 (size=8171) 2024-11-13T16:34:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742033_1209 (size=8171) 2024-11-13T16:34:46,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:46,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T16:34:46,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:46,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/.tmp/cf/06682032513e4714b102a983bad2f027, store: [table=testtb-testConsecutiveExports family=cf region=215e247718bad878e8defc94ed34ee43] 2024-11-13T16:34:46,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/.tmp/cf/06682032513e4714b102a983bad2f027 is 206, key is 1c45b22c525f8a8a66a68f667e7f54f43/cf:q/1731515686434/Put/seqid=0 2024-11-13T16:34:46,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742034_1210 (size=5101) 2024-11-13T16:34:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742034_1210 (size=5101) 2024-11-13T16:34:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742034_1210 (size=5101) 2024-11-13T16:34:46,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:46,924 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:46,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/.tmp/cf/b75ab763fb5f4533ba4bd32c553d04c7, store: [table=testtb-testConsecutiveExports family=cf region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4] 2024-11-13T16:34:46,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/.tmp/cf/b75ab763fb5f4533ba4bd32c553d04c7 is 206, key is 09239b4448203aed122fec6e9edceb7b1/cf:q/1731515686431/Put/seqid=0 2024-11-13T16:34:46,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742035_1211 (size=14853) 2024-11-13T16:34:46,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742035_1211 (size=14853) 2024-11-13T16:34:46,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742035_1211 (size=14853) 2024-11-13T16:34:46,963 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/.tmp/cf/06682032513e4714b102a983bad2f027 2024-11-13T16:34:46,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/.tmp/cf/06682032513e4714b102a983bad2f027 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf/06682032513e4714b102a983bad2f027 2024-11-13T16:34:46,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742036_1212 (size=5904) 2024-11-13T16:34:46,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742036_1212 (size=5904) 2024-11-13T16:34:46,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742036_1212 (size=5904) 2024-11-13T16:34:46,999 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf/06682032513e4714b102a983bad2f027, entries=47, sequenceid=6, filesize=14.5 K 2024-11-13T16:34:47,001 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 215e247718bad878e8defc94ed34ee43 in 215ms, sequenceid=6, compaction requested=false 2024-11-13T16:34:47,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-13T16:34:47,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 215e247718bad878e8defc94ed34ee43: 2024-11-13T16:34:47,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. for snaptb0-testConsecutiveExports completed. 2024-11-13T16:34:47,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-13T16:34:47,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:47,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf/06682032513e4714b102a983bad2f027] hfiles 2024-11-13T16:34:47,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf/06682032513e4714b102a983bad2f027 for snapshot=snaptb0-testConsecutiveExports 2024-11-13T16:34:47,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742037_1213 (size=107) 2024-11-13T16:34:47,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742037_1213 (size=107) 2024-11-13T16:34:47,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:34:47,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-13T16:34:47,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742037_1213 (size=107) 2024-11-13T16:34:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-13T16:34:47,055 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:47,055 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:47,058 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 215e247718bad878e8defc94ed34ee43 in 433 msec 2024-11-13T16:34:47,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T16:34:47,397 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/.tmp/cf/b75ab763fb5f4533ba4bd32c553d04c7 2024-11-13T16:34:47,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/.tmp/cf/b75ab763fb5f4533ba4bd32c553d04c7 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf/b75ab763fb5f4533ba4bd32c553d04c7 2024-11-13T16:34:47,416 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf/b75ab763fb5f4533ba4bd32c553d04c7, entries=3, sequenceid=6, filesize=5.8 K 2024-11-13T16:34:47,423 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for fd0f18ffa0f4b7aeeb47ae49f5eb67b4 in 630ms, sequenceid=6, compaction requested=false 2024-11-13T16:34:47,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for fd0f18ffa0f4b7aeeb47ae49f5eb67b4: 2024-11-13T16:34:47,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. for snaptb0-testConsecutiveExports completed. 2024-11-13T16:34:47,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-13T16:34:47,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:34:47,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf/b75ab763fb5f4533ba4bd32c553d04c7] hfiles 2024-11-13T16:34:47,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf/b75ab763fb5f4533ba4bd32c553d04c7 for snapshot=snaptb0-testConsecutiveExports 2024-11-13T16:34:47,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742038_1214 (size=107) 2024-11-13T16:34:47,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742038_1214 (size=107) 2024-11-13T16:34:47,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742038_1214 (size=107) 2024-11-13T16:34:47,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:34:47,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-13T16:34:47,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-13T16:34:47,497 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:47,497 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:47,512 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=84 2024-11-13T16:34:47,512 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4 in 878 msec 2024-11-13T16:34:47,512 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:34:47,514 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:34:47,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:34:47,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:34:47,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:34:47,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4] hfiles 2024-11-13T16:34:47,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43 2024-11-13T16:34:47,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:34:47,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742039_1215 (size=291) 2024-11-13T16:34:47,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742039_1215 (size=291) 2024-11-13T16:34:47,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742039_1215 (size=291) 2024-11-13T16:34:47,618 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:34:47,618 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-13T16:34:47,625 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T16:34:47,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-13T16:34:47,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-13T16:34:47,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-13T16:34:47,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T16:34:47,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742040_1216 (size=951) 2024-11-13T16:34:47,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742040_1216 (size=951) 2024-11-13T16:34:47,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742040_1216 (size=951) 2024-11-13T16:34:47,839 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:34:47,875 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:34:47,882 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T16:34:47,893 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:34:47,893 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-13T16:34:47,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.3540 sec 2024-11-13T16:34:48,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-13T16:34:48,729 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T16:34:48,730 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729 2024-11-13T16:34:48,730 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:48,812 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0003_000001 (auth:SIMPLE) from 127.0.0.1:44676 2024-11-13T16:34:48,841 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0003/container_1731515545832_0003_01_000001/launch_container.sh] 2024-11-13T16:34:48,841 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0003/container_1731515545832_0003_01_000001/container_tokens] 2024-11-13T16:34:48,842 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0003/container_1731515545832_0003_01_000001/sysfs] 2024-11-13T16:34:48,847 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:34:48,847 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@1d278bea, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T16:34:48,862 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:34:48,888 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T16:34:49,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:49,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:49,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:49,788 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:34:50,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-16604603560207056402.jar 2024-11-13T16:34:50,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-14988678209987311988.jar 2024-11-13T16:34:50,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:34:50,559 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:34:50,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:34:50,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:34:50,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:34:50,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:34:50,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:34:50,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:34:50,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:34:50,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:34:50,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:34:50,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:34:50,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:50,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:50,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:34:50,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:50,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:34:50,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:34:50,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:34:50,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742041_1217 (size=131440) 2024-11-13T16:34:50,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742041_1217 (size=131440) 2024-11-13T16:34:50,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742041_1217 (size=131440) 2024-11-13T16:34:51,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742042_1218 (size=4188619) 2024-11-13T16:34:51,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742042_1218 (size=4188619) 2024-11-13T16:34:51,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742042_1218 (size=4188619) 2024-11-13T16:34:51,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742043_1219 (size=1323991) 2024-11-13T16:34:51,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742043_1219 (size=1323991) 2024-11-13T16:34:51,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742043_1219 (size=1323991) 2024-11-13T16:34:51,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742044_1220 (size=903731) 2024-11-13T16:34:51,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742044_1220 (size=903731) 2024-11-13T16:34:51,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742044_1220 (size=903731) 2024-11-13T16:34:52,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742045_1221 (size=8360083) 2024-11-13T16:34:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742045_1221 (size=8360083) 2024-11-13T16:34:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742045_1221 (size=8360083) 2024-11-13T16:34:52,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742046_1222 (size=1877034) 2024-11-13T16:34:52,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742046_1222 (size=1877034) 2024-11-13T16:34:52,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742046_1222 (size=1877034) 2024-11-13T16:34:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742047_1223 (size=77835) 2024-11-13T16:34:52,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742047_1223 (size=77835) 2024-11-13T16:34:52,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742047_1223 (size=77835) 2024-11-13T16:34:52,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742048_1224 (size=30949) 2024-11-13T16:34:52,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742048_1224 (size=30949) 2024-11-13T16:34:52,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742048_1224 (size=30949) 2024-11-13T16:34:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742049_1225 (size=1597327) 2024-11-13T16:34:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742049_1225 (size=1597327) 2024-11-13T16:34:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742049_1225 (size=1597327) 2024-11-13T16:34:53,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742050_1226 (size=4695811) 2024-11-13T16:34:53,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742050_1226 (size=4695811) 2024-11-13T16:34:53,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742050_1226 (size=4695811) 2024-11-13T16:34:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742051_1227 (size=232957) 2024-11-13T16:34:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742051_1227 (size=232957) 2024-11-13T16:34:53,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742051_1227 (size=232957) 2024-11-13T16:34:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742052_1228 (size=127628) 2024-11-13T16:34:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742052_1228 (size=127628) 2024-11-13T16:34:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742052_1228 (size=127628) 2024-11-13T16:34:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742053_1229 (size=6424544) 2024-11-13T16:34:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742053_1229 (size=6424544) 2024-11-13T16:34:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742053_1229 (size=6424544) 2024-11-13T16:34:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742054_1230 (size=20406) 2024-11-13T16:34:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742054_1230 (size=20406) 2024-11-13T16:34:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742054_1230 (size=20406) 2024-11-13T16:34:53,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742055_1231 (size=5175431) 2024-11-13T16:34:53,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742055_1231 (size=5175431) 2024-11-13T16:34:53,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742055_1231 (size=5175431) 2024-11-13T16:34:53,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742056_1232 (size=217634) 2024-11-13T16:34:53,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742056_1232 (size=217634) 2024-11-13T16:34:53,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742056_1232 (size=217634) 2024-11-13T16:34:53,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742057_1233 (size=1832290) 2024-11-13T16:34:53,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742057_1233 (size=1832290) 2024-11-13T16:34:53,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742057_1233 (size=1832290) 2024-11-13T16:34:53,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742058_1234 (size=322274) 2024-11-13T16:34:53,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742058_1234 (size=322274) 2024-11-13T16:34:53,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742058_1234 (size=322274) 2024-11-13T16:34:53,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742059_1235 (size=503880) 2024-11-13T16:34:53,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742059_1235 (size=503880) 2024-11-13T16:34:53,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742059_1235 (size=503880) 2024-11-13T16:34:53,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742060_1236 (size=29229) 2024-11-13T16:34:53,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742060_1236 (size=29229) 2024-11-13T16:34:53,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742060_1236 (size=29229) 2024-11-13T16:34:53,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742061_1237 (size=24096) 2024-11-13T16:34:53,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742061_1237 (size=24096) 2024-11-13T16:34:53,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742061_1237 (size=24096) 2024-11-13T16:34:53,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742062_1238 (size=111872) 2024-11-13T16:34:53,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742062_1238 (size=111872) 2024-11-13T16:34:53,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742062_1238 (size=111872) 2024-11-13T16:34:53,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742063_1239 (size=440392) 2024-11-13T16:34:53,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742063_1239 (size=440392) 2024-11-13T16:34:53,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742063_1239 (size=440392) 2024-11-13T16:34:53,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742064_1240 (size=45609) 2024-11-13T16:34:53,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742064_1240 (size=45609) 2024-11-13T16:34:53,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742064_1240 (size=45609) 2024-11-13T16:34:53,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742065_1241 (size=136454) 2024-11-13T16:34:53,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742065_1241 (size=136454) 2024-11-13T16:34:53,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742065_1241 (size=136454) 2024-11-13T16:34:53,745 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:34:53,749 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-13T16:34:53,752 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T16:34:53,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742066_1242 (size=714) 2024-11-13T16:34:53,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742066_1242 (size=714) 2024-11-13T16:34:53,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742066_1242 (size=714) 2024-11-13T16:34:53,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742067_1243 (size=15) 2024-11-13T16:34:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742067_1243 (size=15) 2024-11-13T16:34:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742067_1243 (size=15) 2024-11-13T16:34:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742068_1244 (size=303777) 2024-11-13T16:34:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742068_1244 (size=303777) 2024-11-13T16:34:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742068_1244 (size=303777) 2024-11-13T16:34:53,913 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:34:53,913 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:34:54,338 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0004_000001 (auth:SIMPLE) from 127.0.0.1:34556 2024-11-13T16:35:05,907 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:35:08,253 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0004_000001 (auth:SIMPLE) from 127.0.0.1:46780 2024-11-13T16:35:08,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742069_1245 (size=349427) 2024-11-13T16:35:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742069_1245 (size=349427) 2024-11-13T16:35:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742069_1245 (size=349427) 2024-11-13T16:35:10,741 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0004_000001 (auth:SIMPLE) from 127.0.0.1:47474 2024-11-13T16:35:15,646 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 215e247718bad878e8defc94ed34ee43 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:35:15,646 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region fd0f18ffa0f4b7aeeb47ae49f5eb67b4 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:35:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742070_1246 (size=17451) 2024-11-13T16:35:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742070_1246 (size=17451) 2024-11-13T16:35:16,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742070_1246 (size=17451) 2024-11-13T16:35:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742071_1247 (size=462) 2024-11-13T16:35:16,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742071_1247 (size=462) 2024-11-13T16:35:16,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742071_1247 (size=462) 2024-11-13T16:35:16,622 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0004/container_1731515545832_0004_01_000002/launch_container.sh] 2024-11-13T16:35:16,622 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0004/container_1731515545832_0004_01_000002/container_tokens] 2024-11-13T16:35:16,622 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0004/container_1731515545832_0004_01_000002/sysfs] 2024-11-13T16:35:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742072_1248 (size=17451) 2024-11-13T16:35:16,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742072_1248 (size=17451) 2024-11-13T16:35:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742072_1248 (size=17451) 2024-11-13T16:35:16,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742073_1249 (size=349427) 2024-11-13T16:35:16,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742073_1249 (size=349427) 2024-11-13T16:35:16,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742073_1249 (size=349427) 2024-11-13T16:35:16,692 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0004_000001 (auth:SIMPLE) from 127.0.0.1:54840 2024-11-13T16:35:18,334 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:35:18,334 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:35:18,339 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-13T16:35:18,339 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:35:18,340 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:35:18,340 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T16:35:18,340 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T16:35:18,341 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T16:35:18,341 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@1d278bea in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T16:35:18,341 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T16:35:18,341 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T16:35:18,344 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:18,381 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:18,381 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@1d278bea, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T16:35:18,384 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:35:18,393 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-13T16:35:18,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:18,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:18,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-3376783073213180335.jar 2024-11-13T16:35:19,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-4860320058740135324.jar 2024-11-13T16:35:19,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:19,501 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:35:19,501 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:35:19,501 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:35:19,501 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:35:19,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:35:19,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:35:19,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:35:19,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:35:19,502 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:35:19,503 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:35:19,503 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:35:19,503 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:19,503 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:19,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:35:19,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:19,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:19,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:35:19,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:35:19,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742074_1250 (size=131440) 2024-11-13T16:35:19,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742074_1250 (size=131440) 2024-11-13T16:35:19,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742074_1250 (size=131440) 2024-11-13T16:35:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742075_1251 (size=4188619) 2024-11-13T16:35:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742075_1251 (size=4188619) 2024-11-13T16:35:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742075_1251 (size=4188619) 2024-11-13T16:35:19,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742076_1252 (size=440392) 2024-11-13T16:35:19,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742076_1252 (size=440392) 2024-11-13T16:35:19,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742076_1252 (size=440392) 2024-11-13T16:35:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742077_1253 (size=1323991) 2024-11-13T16:35:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742077_1253 (size=1323991) 2024-11-13T16:35:19,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742077_1253 (size=1323991) 2024-11-13T16:35:19,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742078_1254 (size=903731) 2024-11-13T16:35:19,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742078_1254 (size=903731) 2024-11-13T16:35:19,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742078_1254 (size=903731) 2024-11-13T16:35:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742079_1255 (size=8360083) 2024-11-13T16:35:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742079_1255 (size=8360083) 2024-11-13T16:35:19,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742079_1255 (size=8360083) 2024-11-13T16:35:19,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742080_1256 (size=1877034) 2024-11-13T16:35:19,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742080_1256 (size=1877034) 2024-11-13T16:35:19,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742080_1256 (size=1877034) 2024-11-13T16:35:19,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742081_1257 (size=77835) 2024-11-13T16:35:19,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742081_1257 (size=77835) 2024-11-13T16:35:19,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742081_1257 (size=77835) 2024-11-13T16:35:19,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742082_1258 (size=30949) 2024-11-13T16:35:19,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742082_1258 (size=30949) 2024-11-13T16:35:19,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742082_1258 (size=30949) 2024-11-13T16:35:19,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742083_1259 (size=1597327) 2024-11-13T16:35:19,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742083_1259 (size=1597327) 2024-11-13T16:35:19,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742083_1259 (size=1597327) 2024-11-13T16:35:19,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742084_1260 (size=4695811) 2024-11-13T16:35:19,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742084_1260 (size=4695811) 2024-11-13T16:35:19,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742084_1260 (size=4695811) 2024-11-13T16:35:19,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742085_1261 (size=232957) 2024-11-13T16:35:19,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742085_1261 (size=232957) 2024-11-13T16:35:19,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742085_1261 (size=232957) 2024-11-13T16:35:19,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742086_1262 (size=127628) 2024-11-13T16:35:19,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742086_1262 (size=127628) 2024-11-13T16:35:19,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742086_1262 (size=127628) 2024-11-13T16:35:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742087_1263 (size=20406) 2024-11-13T16:35:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742087_1263 (size=20406) 2024-11-13T16:35:19,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742087_1263 (size=20406) 2024-11-13T16:35:19,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742088_1264 (size=5175431) 2024-11-13T16:35:19,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742088_1264 (size=5175431) 2024-11-13T16:35:19,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742088_1264 (size=5175431) 2024-11-13T16:35:19,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742089_1265 (size=217634) 2024-11-13T16:35:19,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742089_1265 (size=217634) 2024-11-13T16:35:19,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742089_1265 (size=217634) 2024-11-13T16:35:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742090_1266 (size=6424544) 2024-11-13T16:35:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742090_1266 (size=6424544) 2024-11-13T16:35:19,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742090_1266 (size=6424544) 2024-11-13T16:35:19,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742091_1267 (size=1832290) 2024-11-13T16:35:19,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742091_1267 (size=1832290) 2024-11-13T16:35:19,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742091_1267 (size=1832290) 2024-11-13T16:35:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742092_1268 (size=322274) 2024-11-13T16:35:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742092_1268 (size=322274) 2024-11-13T16:35:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742092_1268 (size=322274) 2024-11-13T16:35:19,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742093_1269 (size=503880) 2024-11-13T16:35:19,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742093_1269 (size=503880) 2024-11-13T16:35:19,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742093_1269 (size=503880) 2024-11-13T16:35:19,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742094_1270 (size=29229) 2024-11-13T16:35:19,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742094_1270 (size=29229) 2024-11-13T16:35:19,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742094_1270 (size=29229) 2024-11-13T16:35:19,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742095_1271 (size=24096) 2024-11-13T16:35:19,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742095_1271 (size=24096) 2024-11-13T16:35:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742095_1271 (size=24096) 2024-11-13T16:35:19,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742096_1272 (size=111872) 2024-11-13T16:35:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742096_1272 (size=111872) 2024-11-13T16:35:19,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742096_1272 (size=111872) 2024-11-13T16:35:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742097_1273 (size=45609) 2024-11-13T16:35:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742097_1273 (size=45609) 2024-11-13T16:35:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742097_1273 (size=45609) 2024-11-13T16:35:19,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742098_1274 (size=136454) 2024-11-13T16:35:19,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742098_1274 (size=136454) 2024-11-13T16:35:19,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742098_1274 (size=136454) 2024-11-13T16:35:19,982 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:35:19,986 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-13T16:35:19,989 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T16:35:20,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742099_1275 (size=714) 2024-11-13T16:35:20,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742099_1275 (size=714) 2024-11-13T16:35:20,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742099_1275 (size=714) 2024-11-13T16:35:20,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742100_1276 (size=15) 2024-11-13T16:35:20,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742100_1276 (size=15) 2024-11-13T16:35:20,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742100_1276 (size=15) 2024-11-13T16:35:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742101_1277 (size=303773) 2024-11-13T16:35:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742101_1277 (size=303773) 2024-11-13T16:35:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742101_1277 (size=303773) 2024-11-13T16:35:22,787 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:35:22,787 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:35:22,799 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0004_000001 (auth:SIMPLE) from 127.0.0.1:52344 2024-11-13T16:35:22,810 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0004/container_1731515545832_0004_01_000001/launch_container.sh] 2024-11-13T16:35:22,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0004/container_1731515545832_0004_01_000001/container_tokens] 2024-11-13T16:35:22,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0004/container_1731515545832_0004_01_000001/sysfs] 2024-11-13T16:35:23,653 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0005_000001 (auth:SIMPLE) from 127.0.0.1:54844 2024-11-13T16:35:29,897 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 215e247718bad878e8defc94ed34ee43, had cached 0 bytes from a total of 14853 2024-11-13T16:35:29,899 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fd0f18ffa0f4b7aeeb47ae49f5eb67b4, had cached 0 bytes from a total of 5904 2024-11-13T16:35:33,475 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0005_000001 (auth:SIMPLE) from 127.0.0.1:41570 2024-11-13T16:35:33,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742102_1278 (size=349423) 2024-11-13T16:35:33,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742102_1278 (size=349423) 2024-11-13T16:35:33,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742102_1278 (size=349423) 2024-11-13T16:35:35,743 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0005_000001 (auth:SIMPLE) from 127.0.0.1:42784 2024-11-13T16:35:35,907 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:35:39,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742103_1279 (size=16925) 2024-11-13T16:35:39,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742103_1279 (size=16925) 2024-11-13T16:35:39,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742103_1279 (size=16925) 2024-11-13T16:35:39,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742104_1280 (size=462) 2024-11-13T16:35:39,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742104_1280 (size=462) 2024-11-13T16:35:39,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742104_1280 (size=462) 2024-11-13T16:35:39,504 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0005/container_1731515545832_0005_01_000002/launch_container.sh] 2024-11-13T16:35:39,504 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0005/container_1731515545832_0005_01_000002/container_tokens] 2024-11-13T16:35:39,505 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0005/container_1731515545832_0005_01_000002/sysfs] 2024-11-13T16:35:39,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742105_1281 (size=16925) 2024-11-13T16:35:39,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742105_1281 (size=16925) 2024-11-13T16:35:39,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742105_1281 (size=16925) 2024-11-13T16:35:39,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742106_1282 (size=349423) 2024-11-13T16:35:39,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742106_1282 (size=349423) 2024-11-13T16:35:39,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742106_1282 (size=349423) 2024-11-13T16:35:41,320 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:35:41,320 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:35:41,340 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-13T16:35:41,340 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:35:41,340 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:35:41,341 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T16:35:41,342 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T16:35:41,342 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T16:35:41,342 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@1d278bea in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-13T16:35:41,343 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-13T16:35:41,343 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515688729/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-13T16:35:41,366 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testConsecutiveExports 2024-11-13T16:35:41,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T16:35:41,375 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515741375"}]},"ts":"1731515741375"} 2024-11-13T16:35:41,380 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-13T16:35:41,381 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-13T16:35:41,383 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-13T16:35:41,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, UNASSIGN}] 2024-11-13T16:35:41,391 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, UNASSIGN 2024-11-13T16:35:41,391 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, UNASSIGN 2024-11-13T16:35:41,394 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=215e247718bad878e8defc94ed34ee43, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:35:41,394 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, regionState=CLOSING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:35:41,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, UNASSIGN because future has completed 2024-11-13T16:35:41,398 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:35:41,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 215e247718bad878e8defc94ed34ee43, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:35:41,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, UNASSIGN because future has completed 2024-11-13T16:35:41,410 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:35:41,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:35:41,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T16:35:41,557 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 215e247718bad878e8defc94ed34ee43 2024-11-13T16:35:41,557 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:35:41,558 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 215e247718bad878e8defc94ed34ee43, disabling compactions & flushes 2024-11-13T16:35:41,558 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:35:41,558 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:35:41,558 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. after waiting 0 ms 2024-11-13T16:35:41,558 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:35:41,565 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:35:41,566 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:35:41,566 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing fd0f18ffa0f4b7aeeb47ae49f5eb67b4, disabling compactions & flushes 2024-11-13T16:35:41,566 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:35:41,566 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:35:41,566 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. after waiting 0 ms 2024-11-13T16:35:41,566 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:35:41,570 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:35:41,571 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:35:41,571 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43. 2024-11-13T16:35:41,571 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 215e247718bad878e8defc94ed34ee43: Waiting for close lock at 1731515741558Running coprocessor pre-close hooks at 1731515741558Disabling compacts and flushes for region at 1731515741558Disabling writes for close at 1731515741558Writing region close event to WAL at 1731515741563 (+5 ms)Running coprocessor post-close hooks at 1731515741571 (+8 ms)Closed at 1731515741571 2024-11-13T16:35:41,580 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 215e247718bad878e8defc94ed34ee43 2024-11-13T16:35:41,586 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=215e247718bad878e8defc94ed34ee43, regionState=CLOSED 2024-11-13T16:35:41,587 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:35:41,588 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:35:41,588 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4. 2024-11-13T16:35:41,588 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for fd0f18ffa0f4b7aeeb47ae49f5eb67b4: Waiting for close lock at 1731515741566Running coprocessor pre-close hooks at 1731515741566Disabling compacts and flushes for region at 1731515741566Disabling writes for close at 1731515741566Writing region close event to WAL at 1731515741567 (+1 ms)Running coprocessor post-close hooks at 1731515741588 (+21 ms)Closed at 1731515741588 2024-11-13T16:35:41,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 215e247718bad878e8defc94ed34ee43, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:35:41,591 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:35:41,592 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, regionState=CLOSED 2024-11-13T16:35:41,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-13T16:35:41,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 215e247718bad878e8defc94ed34ee43, server=f8f85b0bb1d8,38523,1731515529292 in 191 msec 2024-11-13T16:35:41,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:35:41,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=215e247718bad878e8defc94ed34ee43, UNASSIGN in 205 msec 2024-11-13T16:35:41,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=89 2024-11-13T16:35:41,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure fd0f18ffa0f4b7aeeb47ae49f5eb67b4, server=f8f85b0bb1d8,39619,1731515529105 in 185 msec 2024-11-13T16:35:41,602 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=89, resume processing ppid=88 2024-11-13T16:35:41,602 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=fd0f18ffa0f4b7aeeb47ae49f5eb67b4, UNASSIGN in 211 msec 2024-11-13T16:35:41,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-13T16:35:41,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 221 msec 2024-11-13T16:35:41,610 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515741610"}]},"ts":"1731515741610"} 2024-11-13T16:35:41,613 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-13T16:35:41,613 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-13T16:35:41,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 249 msec 2024-11-13T16:35:41,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-13T16:35:41,689 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T16:35:41,690 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testConsecutiveExports 2024-11-13T16:35:41,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,693 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-13T16:35:41,694 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,697 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-13T16:35:41,699 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:35:41,702 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/recovered.edits] 2024-11-13T16:35:41,711 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43 2024-11-13T16:35:41,711 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf/b75ab763fb5f4533ba4bd32c553d04c7 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/cf/b75ab763fb5f4533ba4bd32c553d04c7 2024-11-13T16:35:41,713 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/recovered.edits] 2024-11-13T16:35:41,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,715 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-13T16:35:41,715 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-13T16:35:41,715 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-13T16:35:41,716 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4/recovered.edits/9.seqid 2024-11-13T16:35:41,720 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:35:41,722 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf/06682032513e4714b102a983bad2f027 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/cf/06682032513e4714b102a983bad2f027 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:41,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-11-13T16:35:41,723 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T16:35:41,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-13T16:35:41,726 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43/recovered.edits/9.seqid 2024-11-13T16:35:41,727 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testConsecutiveExports/215e247718bad878e8defc94ed34ee43 2024-11-13T16:35:41,727 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-13T16:35:41,728 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-13T16:35:41,729 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-11-13T16:35:41,735 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241113a2a352bc2b3e434aa989b616fe28d11d_215e247718bad878e8defc94ed34ee43 2024-11-13T16:35:41,737 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e2024111335e5007d0542494cb7dd86c2dbb63051_fd0f18ffa0f4b7aeeb47ae49f5eb67b4 2024-11-13T16:35:41,738 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-13T16:35:41,746 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,750 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-13T16:35:41,753 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-13T16:35:41,755 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,755 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-13T16:35:41,756 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515741755"}]},"ts":"9223372036854775807"} 2024-11-13T16:35:41,756 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515741755"}]},"ts":"9223372036854775807"} 2024-11-13T16:35:41,759 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:35:41,759 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => fd0f18ffa0f4b7aeeb47ae49f5eb67b4, NAME => 'testtb-testConsecutiveExports,,1731515684515.fd0f18ffa0f4b7aeeb47ae49f5eb67b4.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 215e247718bad878e8defc94ed34ee43, NAME => 'testtb-testConsecutiveExports,1,1731515684515.215e247718bad878e8defc94ed34ee43.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:35:41,759 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-13T16:35:41,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515741759"}]},"ts":"9223372036854775807"} 2024-11-13T16:35:41,762 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-13T16:35:41,764 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-13T16:35:41,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 74 msec 2024-11-13T16:35:41,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-13T16:35:41,829 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-13T16:35:41,829 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-13T16:35:41,849 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-13T16:35:41,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-13T16:35:41,868 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-13T16:35:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-13T16:35:41,932 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=794 (was 791) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:37729 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_246402555_1 at /127.0.0.1:47858 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:47886 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4154 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:39968 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 96048) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37729 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:59934 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=796 (was 803), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1436 (was 1826), ProcessCount=19 (was 19), AvailableMemoryMB=1107 (was 1785) 2024-11-13T16:35:41,932 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-11-13T16:35:41,983 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=794, OpenFileDescriptor=798, MaxFileDescriptor=1048576, SystemLoadAverage=1436, ProcessCount=19, AvailableMemoryMB=1106 2024-11-13T16:35:41,983 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-11-13T16:35:41,991 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:35:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:42,000 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:35:42,002 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-13T16:35:42,003 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:35:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T16:35:42,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742107_1283 (size=458) 2024-11-13T16:35:42,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742107_1283 (size=458) 2024-11-13T16:35:42,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742107_1283 (size=458) 2024-11-13T16:35:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T16:35:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T16:35:42,545 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0b6fd4461bcb296079228f893028756c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:42,545 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d8bcb5a49f7b9b9ccf99cda1f05a4ae8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:42,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742109_1285 (size=83) 2024-11-13T16:35:42,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742109_1285 (size=83) 2024-11-13T16:35:42,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742109_1285 (size=83) 2024-11-13T16:35:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742108_1284 (size=83) 2024-11-13T16:35:42,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742108_1284 (size=83) 2024-11-13T16:35:42,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742108_1284 (size=83) 2024-11-13T16:35:42,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:42,574 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing d8bcb5a49f7b9b9ccf99cda1f05a4ae8, disabling compactions & flushes 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 0b6fd4461bcb296079228f893028756c, disabling compactions & flushes 2024-11-13T16:35:42,575 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,575 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. after waiting 0 ms 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. after waiting 0 ms 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,575 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,575 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0b6fd4461bcb296079228f893028756c: Waiting for close lock at 1731515742575Disabling compacts and flushes for region at 1731515742575Disabling writes for close at 1731515742575Writing region close event to WAL at 1731515742575Closed at 1731515742575 2024-11-13T16:35:42,575 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for d8bcb5a49f7b9b9ccf99cda1f05a4ae8: Waiting for close lock at 1731515742575Disabling compacts and flushes for region at 1731515742575Disabling writes for close at 1731515742575Writing region close event to WAL at 1731515742575Closed at 1731515742575 2024-11-13T16:35:42,579 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:35:42,579 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731515742579"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515742579"}]},"ts":"1731515742579"} 2024-11-13T16:35:42,579 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731515742579"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515742579"}]},"ts":"1731515742579"} 2024-11-13T16:35:42,583 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:35:42,584 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:35:42,584 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515742584"}]},"ts":"1731515742584"} 2024-11-13T16:35:42,587 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-13T16:35:42,588 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:35:42,595 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:35:42,595 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:35:42,595 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:35:42,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:35:42,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, ASSIGN}] 2024-11-13T16:35:42,599 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, ASSIGN 2024-11-13T16:35:42,600 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, ASSIGN 2024-11-13T16:35:42,601 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:35:42,602 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:35:42,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T16:35:42,752 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:35:42,752 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:42,752 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=0b6fd4461bcb296079228f893028756c, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:35:42,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, ASSIGN because future has completed 2024-11-13T16:35:42,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:35:42,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, ASSIGN because future has completed 2024-11-13T16:35:42,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0b6fd4461bcb296079228f893028756c, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:35:42,927 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,927 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => d8bcb5a49f7b9b9ccf99cda1f05a4ae8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:35:42,928 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. service=AccessControlService 2024-11-13T16:35:42,928 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 0b6fd4461bcb296079228f893028756c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:35:42,929 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. service=AccessControlService 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,929 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,933 INFO [StoreOpener-d8bcb5a49f7b9b9ccf99cda1f05a4ae8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,934 INFO [StoreOpener-0b6fd4461bcb296079228f893028756c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,937 INFO [StoreOpener-0b6fd4461bcb296079228f893028756c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0b6fd4461bcb296079228f893028756c columnFamilyName cf 2024-11-13T16:35:42,938 DEBUG [StoreOpener-0b6fd4461bcb296079228f893028756c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:42,939 INFO [StoreOpener-0b6fd4461bcb296079228f893028756c-1 {}] regionserver.HStore(327): Store=0b6fd4461bcb296079228f893028756c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:35:42,941 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,942 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,943 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,944 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,944 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,946 INFO [StoreOpener-d8bcb5a49f7b9b9ccf99cda1f05a4ae8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8bcb5a49f7b9b9ccf99cda1f05a4ae8 columnFamilyName cf 2024-11-13T16:35:42,947 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,951 DEBUG [StoreOpener-d8bcb5a49f7b9b9ccf99cda1f05a4ae8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:42,951 INFO [StoreOpener-d8bcb5a49f7b9b9ccf99cda1f05a4ae8-1 {}] regionserver.HStore(327): Store=d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:35:42,951 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:35:42,952 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,952 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 0b6fd4461bcb296079228f893028756c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65259577, jitterRate=-0.02755652368068695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:35:42,952 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:42,953 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 0b6fd4461bcb296079228f893028756c: Running coprocessor pre-open hook at 1731515742930Writing region info on filesystem at 1731515742930Initializing all the Stores at 1731515742932 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515742932Cleaning up temporary data from old regions at 1731515742944 (+12 ms)Running coprocessor post-open hooks at 1731515742952 (+8 ms)Region opened successfully at 1731515742953 (+1 ms) 2024-11-13T16:35:42,955 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,955 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c., pid=98, masterSystemTime=1731515742910 2024-11-13T16:35:42,955 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,956 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,956 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,965 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,965 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,965 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:42,967 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=0b6fd4461bcb296079228f893028756c, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:35:42,967 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:35:42,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0b6fd4461bcb296079228f893028756c, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:35:42,970 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened d8bcb5a49f7b9b9ccf99cda1f05a4ae8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75112763, jitterRate=0.11926738917827606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:35:42,970 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:42,970 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for d8bcb5a49f7b9b9ccf99cda1f05a4ae8: Running coprocessor pre-open hook at 1731515742929Writing region info on filesystem at 1731515742929Initializing all the Stores at 1731515742932 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515742932Cleaning up temporary data from old regions at 1731515742957 (+25 ms)Running coprocessor post-open hooks at 1731515742970 (+13 ms)Region opened successfully at 1731515742970 2024-11-13T16:35:42,971 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8., pid=97, masterSystemTime=1731515742908 2024-11-13T16:35:42,982 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,982 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:42,987 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:42,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-11-13T16:35:42,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure 0b6fd4461bcb296079228f893028756c, server=f8f85b0bb1d8,38523,1731515529292 in 223 msec 2024-11-13T16:35:42,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:35:42,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, ASSIGN in 394 msec 2024-11-13T16:35:43,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=95 2024-11-13T16:35:43,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8, server=f8f85b0bb1d8,38437,1731515528789 in 244 msec 2024-11-13T16:35:43,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=95, resume processing ppid=94 2024-11-13T16:35:43,017 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, ASSIGN in 418 msec 2024-11-13T16:35:43,018 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:35:43,018 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515743018"}]},"ts":"1731515743018"} 2024-11-13T16:35:43,027 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-13T16:35:43,029 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:35:43,030 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-13T16:35:43,038 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-13T16:35:43,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:43,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:43,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:43,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:43,111 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-13T16:35:43,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:43,133 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T16:35:43,151 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:43,153 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 1.1590 sec 2024-11-13T16:35:43,154 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:43,154 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:44,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-13T16:35:44,158 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T16:35:44,159 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:44,162 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:35:44,164 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,169 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,175 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T16:35:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515744178 (current time:1731515744178). 2024-11-13T16:35:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:35:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-13T16:35:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:35:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4175978f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:35:44,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:35:44,181 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:35:44,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:35:44,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:35:44,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fe640b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:35:44,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:35:44,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,183 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38180, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:35:44,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3ccf6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:35:44,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:35:44,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:44,186 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45678, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:44,188 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:35:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:35:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,188 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:35:44,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bfbcca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:35:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:35:44,190 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:35:44,190 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:35:44,190 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:35:44,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a4356ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:35:44,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:35:44,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,192 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:35:44,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cba6815, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:35:44,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:35:44,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:44,195 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:44,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:35:44,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:44,198 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38460, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:44,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:35:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:35:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,199 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:35:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-13T16:35:44,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:35:44,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T16:35:44,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-13T16:35:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-13T16:35:44,202 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:35:44,203 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:35:44,206 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:35:44,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742110_1286 (size=215) 2024-11-13T16:35:44,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742110_1286 (size=215) 2024-11-13T16:35:44,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742110_1286 (size=215) 2024-11-13T16:35:44,217 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:35:44,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c}] 2024-11-13T16:35:44,218 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,219 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-13T16:35:44,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for 0b6fd4461bcb296079228f893028756c: 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T16:35:44,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for d8bcb5a49f7b9b9ccf99cda1f05a4ae8: 2024-11-13T16:35:44,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T16:35:44,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:35:44,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:35:44,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742111_1287 (size=86) 2024-11-13T16:35:44,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742111_1287 (size=86) 2024-11-13T16:35:44,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742111_1287 (size=86) 2024-11-13T16:35:44,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:44,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-13T16:35:44,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-13T16:35:44,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,396 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,399 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c in 181 msec 2024-11-13T16:35:44,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742112_1288 (size=86) 2024-11-13T16:35:44,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742112_1288 (size=86) 2024-11-13T16:35:44,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742112_1288 (size=86) 2024-11-13T16:35:44,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:44,407 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-13T16:35:44,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-13T16:35:44,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,408 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=99 2024-11-13T16:35:44,411 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:35:44,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8 in 192 msec 2024-11-13T16:35:44,412 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:35:44,413 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:35:44,413 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:35:44,413 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:44,414 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:35:44,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742113_1289 (size=78) 2024-11-13T16:35:44,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742113_1289 (size=78) 2024-11-13T16:35:44,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742113_1289 (size=78) 2024-11-13T16:35:44,424 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:35:44,424 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,424 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742114_1290 (size=713) 2024-11-13T16:35:44,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742114_1290 (size=713) 2024-11-13T16:35:44,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742114_1290 (size=713) 2024-11-13T16:35:44,436 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:35:44,441 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:35:44,442 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,443 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:35:44,443 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-13T16:35:44,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 243 msec 2024-11-13T16:35:44,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-13T16:35:44,523 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T16:35:44,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:35:44,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:35:44,541 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,546 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:44,547 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:35:44,549 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,557 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,569 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-13T16:35:44,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T16:35:44,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515744573 (current time:1731515744573). 2024-11-13T16:35:44,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:35:44,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-13T16:35:44,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:35:44,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@378d0216, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:35:44,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:35:44,576 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:35:44,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:35:44,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:35:44,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@299b6da3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:35:44,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:35:44,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,578 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38218, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:35:44,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac086cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:35:44,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:35:44,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:44,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:44,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:35:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:35:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,586 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:35:44,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458042a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:35:44,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:35:44,588 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:35:44,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:35:44,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:35:44,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71699cd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:35:44,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:35:44,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,589 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38226, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:35:44,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c36d5c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:44,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:35:44,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:35:44,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:44,594 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45702, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:44,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:35:44,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:44,599 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:44,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:35:44,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:35:44,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:44,601 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:35:44,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-13T16:35:44,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:35:44,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-13T16:35:44,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-13T16:35:44,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T16:35:44,606 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:35:44,607 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:35:44,612 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:35:44,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742115_1291 (size=210) 2024-11-13T16:35:44,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742115_1291 (size=210) 2024-11-13T16:35:44,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742115_1291 (size=210) 2024-11-13T16:35:44,633 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:35:44,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c}] 2024-11-13T16:35:44,634 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,634 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T16:35:44,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-13T16:35:44,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-13T16:35:44,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:44,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:44,786 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing d8bcb5a49f7b9b9ccf99cda1f05a4ae8 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-13T16:35:44,786 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing 0b6fd4461bcb296079228f893028756c 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-13T16:35:44,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8 is 69, key is 0a95d472110dd7eb5cba10eddb72216b6/cf:q/1731515744535/Put/seqid=0 2024-11-13T16:35:44,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742116_1292 (size=4964) 2024-11-13T16:35:44,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742116_1292 (size=4964) 2024-11-13T16:35:44,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742116_1292 (size=4964) 2024-11-13T16:35:44,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:44,818 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c is 71, key is 10e8c2d01e7310979f464046acd3bf6f/cf:q/1731515744538/Put/seqid=0 2024-11-13T16:35:44,828 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,830 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/.tmp/cf/13ff8e10a15d42c4b06a2fddebcf4a22, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8] 2024-11-13T16:35:44,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/.tmp/cf/13ff8e10a15d42c4b06a2fddebcf4a22 is 224, key is 0a95d472110dd7eb5cba10eddb72216b6/cf:q/1731515744535/Put/seqid=0 2024-11-13T16:35:44,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742117_1293 (size=8311) 2024-11-13T16:35:44,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742117_1293 (size=8311) 2024-11-13T16:35:44,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742117_1293 (size=8311) 2024-11-13T16:35:44,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742118_1294 (size=5534) 2024-11-13T16:35:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742118_1294 (size=5534) 2024-11-13T16:35:44,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742118_1294 (size=5534) 2024-11-13T16:35:44,849 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/.tmp/cf/13ff8e10a15d42c4b06a2fddebcf4a22 2024-11-13T16:35:44,852 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/.tmp/cf/097e4ad3d4bf4153b12637323f4c0170, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=0b6fd4461bcb296079228f893028756c] 2024-11-13T16:35:44,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/.tmp/cf/097e4ad3d4bf4153b12637323f4c0170 is 224, key is 1852bbcb806e57e1055be14233c6d10d4/cf:q/1731515744538/Put/seqid=0 2024-11-13T16:35:44,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/.tmp/cf/13ff8e10a15d42c4b06a2fddebcf4a22 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf/13ff8e10a15d42c4b06a2fddebcf4a22 2024-11-13T16:35:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742119_1295 (size=16157) 2024-11-13T16:35:44,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742119_1295 (size=16157) 2024-11-13T16:35:44,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742119_1295 (size=16157) 2024-11-13T16:35:44,859 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/.tmp/cf/097e4ad3d4bf4153b12637323f4c0170 2024-11-13T16:35:44,860 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf/13ff8e10a15d42c4b06a2fddebcf4a22, entries=1, sequenceid=6, filesize=5.4 K 2024-11-13T16:35:44,861 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for d8bcb5a49f7b9b9ccf99cda1f05a4ae8 in 75ms, sequenceid=6, compaction requested=false 2024-11-13T16:35:44,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for d8bcb5a49f7b9b9ccf99cda1f05a4ae8: 2024-11-13T16:35:44,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T16:35:44,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:35:44,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf/13ff8e10a15d42c4b06a2fddebcf4a22] hfiles 2024-11-13T16:35:44,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf/13ff8e10a15d42c4b06a2fddebcf4a22 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,867 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/.tmp/cf/097e4ad3d4bf4153b12637323f4c0170 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf/097e4ad3d4bf4153b12637323f4c0170 2024-11-13T16:35:44,873 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf/097e4ad3d4bf4153b12637323f4c0170, entries=49, sequenceid=6, filesize=15.8 K 2024-11-13T16:35:44,874 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 0b6fd4461bcb296079228f893028756c in 88ms, sequenceid=6, compaction requested=false 2024-11-13T16:35:44,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for 0b6fd4461bcb296079228f893028756c: 2024-11-13T16:35:44,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-13T16:35:44,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:35:44,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf/097e4ad3d4bf4153b12637323f4c0170] hfiles 2024-11-13T16:35:44,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf/097e4ad3d4bf4153b12637323f4c0170 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742120_1296 (size=125) 2024-11-13T16:35:44,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742120_1296 (size=125) 2024-11-13T16:35:44,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:35:44,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-13T16:35:44,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742120_1296 (size=125) 2024-11-13T16:35:44,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-13T16:35:44,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,878 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8 in 249 msec 2024-11-13T16:35:44,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742121_1297 (size=125) 2024-11-13T16:35:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742121_1297 (size=125) 2024-11-13T16:35:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742121_1297 (size=125) 2024-11-13T16:35:44,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:35:44,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-13T16:35:44,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-13T16:35:44,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,890 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=104, resume processing ppid=102 2024-11-13T16:35:44,894 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:35:44,894 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0b6fd4461bcb296079228f893028756c in 258 msec 2024-11-13T16:35:44,894 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:35:44,895 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:35:44,896 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:35:44,896 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:44,897 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8] hfiles 2024-11-13T16:35:44,897 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c 2024-11-13T16:35:44,897 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:35:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742122_1298 (size=309) 2024-11-13T16:35:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742122_1298 (size=309) 2024-11-13T16:35:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742122_1298 (size=309) 2024-11-13T16:35:44,914 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:35:44,914 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,915 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T16:35:44,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742123_1299 (size=1023) 2024-11-13T16:35:44,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742123_1299 (size=1023) 2024-11-13T16:35:44,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742123_1299 (size=1023) 2024-11-13T16:35:44,928 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:35:44,934 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:35:44,934 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:44,936 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:35:44,936 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-13T16:35:44,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 334 msec 2024-11-13T16:35:45,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-13T16:35:45,228 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T16:35:45,254 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T16:35:45,256 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T16:35:45,256 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T16:35:45,257 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:35:45,258 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:35:45,259 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:35:45,259 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-13T16:35:45,260 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39619 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-13T16:35:45,260 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38523 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-13T16:35:45,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:35:45,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:45,269 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:35:45,269 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:45,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-13T16:35:45,270 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:35:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T16:35:45,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742124_1300 (size=399) 2024-11-13T16:35:45,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742124_1300 (size=399) 2024-11-13T16:35:45,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742124_1300 (size=399) 2024-11-13T16:35:45,291 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 38ce8102dfca11130c2bb14697ca56d3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:45,291 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8ba00fb83d1a1bdde651c03df5ab8404, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:45,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742125_1301 (size=85) 2024-11-13T16:35:45,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742125_1301 (size=85) 2024-11-13T16:35:45,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742125_1301 (size=85) 2024-11-13T16:35:45,310 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:45,311 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 8ba00fb83d1a1bdde651c03df5ab8404, disabling compactions & flushes 2024-11-13T16:35:45,311 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,311 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,311 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. after waiting 0 ms 2024-11-13T16:35:45,311 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,311 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,311 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8ba00fb83d1a1bdde651c03df5ab8404: Waiting for close lock at 1731515745310Disabling compacts and flushes for region at 1731515745310Disabling writes for close at 1731515745311 (+1 ms)Writing region close event to WAL at 1731515745311Closed at 1731515745311 2024-11-13T16:35:45,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742126_1302 (size=85) 2024-11-13T16:35:45,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742126_1302 (size=85) 2024-11-13T16:35:45,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742126_1302 (size=85) 2024-11-13T16:35:45,318 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:45,318 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 38ce8102dfca11130c2bb14697ca56d3, disabling compactions & flushes 2024-11-13T16:35:45,318 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,318 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,318 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. after waiting 0 ms 2024-11-13T16:35:45,318 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,319 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,319 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 38ce8102dfca11130c2bb14697ca56d3: Waiting for close lock at 1731515745318Disabling compacts and flushes for region at 1731515745318Disabling writes for close at 1731515745318Writing region close event to WAL at 1731515745318Closed at 1731515745319 (+1 ms) 2024-11-13T16:35:45,331 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:35:45,331 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731515745331"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515745331"}]},"ts":"1731515745331"} 2024-11-13T16:35:45,331 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731515745331"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515745331"}]},"ts":"1731515745331"} 2024-11-13T16:35:45,334 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:35:45,335 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:35:45,336 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515745335"}]},"ts":"1731515745335"} 2024-11-13T16:35:45,337 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-13T16:35:45,338 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:35:45,339 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:35:45,339 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:35:45,339 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:35:45,339 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:35:45,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, ASSIGN}] 2024-11-13T16:35:45,341 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, ASSIGN 2024-11-13T16:35:45,341 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, ASSIGN 2024-11-13T16:35:45,343 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:35:45,343 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:35:45,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T16:35:45,494 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:35:45,494 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=8ba00fb83d1a1bdde651c03df5ab8404, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:45,494 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=38ce8102dfca11130c2bb14697ca56d3, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:35:45,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, ASSIGN because future has completed 2024-11-13T16:35:45,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8ba00fb83d1a1bdde651c03df5ab8404, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:35:45,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, ASSIGN because future has completed 2024-11-13T16:35:45,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 38ce8102dfca11130c2bb14697ca56d3, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:35:45,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T16:35:45,658 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,658 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,658 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => 8ba00fb83d1a1bdde651c03df5ab8404, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404.', STARTKEY => '', ENDKEY => '2'} 2024-11-13T16:35:45,658 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => 38ce8102dfca11130c2bb14697ca56d3, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3.', STARTKEY => '2', ENDKEY => ''} 2024-11-13T16:35:45,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. service=AccessControlService 2024-11-13T16:35:45,659 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:35:45,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:45,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,660 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. service=AccessControlService 2024-11-13T16:35:45,660 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:35:45,660 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,661 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:45,661 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,661 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,663 INFO [StoreOpener-38ce8102dfca11130c2bb14697ca56d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,666 INFO [StoreOpener-38ce8102dfca11130c2bb14697ca56d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 38ce8102dfca11130c2bb14697ca56d3 columnFamilyName cf 2024-11-13T16:35:45,666 DEBUG [StoreOpener-38ce8102dfca11130c2bb14697ca56d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:45,667 INFO [StoreOpener-38ce8102dfca11130c2bb14697ca56d3-1 {}] regionserver.HStore(327): Store=38ce8102dfca11130c2bb14697ca56d3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:35:45,667 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,670 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,671 INFO [StoreOpener-8ba00fb83d1a1bdde651c03df5ab8404-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,671 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,671 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,672 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,672 INFO [StoreOpener-8ba00fb83d1a1bdde651c03df5ab8404-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8ba00fb83d1a1bdde651c03df5ab8404 columnFamilyName cf 2024-11-13T16:35:45,672 DEBUG [StoreOpener-8ba00fb83d1a1bdde651c03df5ab8404-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:45,673 INFO [StoreOpener-8ba00fb83d1a1bdde651c03df5ab8404-1 {}] regionserver.HStore(327): Store=8ba00fb83d1a1bdde651c03df5ab8404/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:35:45,673 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,674 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,674 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,674 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,675 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,675 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,676 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:35:45,676 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,676 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened 38ce8102dfca11130c2bb14697ca56d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65501131, jitterRate=-0.023957088589668274}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:35:45,676 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:45,677 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for 38ce8102dfca11130c2bb14697ca56d3: Running coprocessor pre-open hook at 1731515745659Writing region info on filesystem at 1731515745659Initializing all the Stores at 1731515745662 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515745662Cleaning up temporary data from old regions at 1731515745672 (+10 ms)Running coprocessor post-open hooks at 1731515745676 (+4 ms)Region opened successfully at 1731515745677 (+1 ms) 2024-11-13T16:35:45,680 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:35:45,681 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened 8ba00fb83d1a1bdde651c03df5ab8404; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67060291, jitterRate=-7.237941026687622E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:35:45,681 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:45,681 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for 8ba00fb83d1a1bdde651c03df5ab8404: Running coprocessor pre-open hook at 1731515745661Writing region info on filesystem at 1731515745661Initializing all the Stores at 1731515745662 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515745662Cleaning up temporary data from old regions at 1731515745675 (+13 ms)Running coprocessor post-open hooks at 1731515745681 (+6 ms)Region opened successfully at 1731515745681 2024-11-13T16:35:45,683 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3., pid=109, masterSystemTime=1731515745655 2024-11-13T16:35:45,687 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404., pid=108, masterSystemTime=1731515745655 2024-11-13T16:35:45,693 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,693 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:45,694 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=38ce8102dfca11130c2bb14697ca56d3, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:35:45,694 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,694 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:45,694 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=8ba00fb83d1a1bdde651c03df5ab8404, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:45,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 38ce8102dfca11130c2bb14697ca56d3, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:35:45,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8ba00fb83d1a1bdde651c03df5ab8404, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:35:45,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=107 2024-11-13T16:35:45,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 38ce8102dfca11130c2bb14697ca56d3, server=f8f85b0bb1d8,39619,1731515529105 in 198 msec 2024-11-13T16:35:45,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=106 2024-11-13T16:35:45,710 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, ASSIGN in 364 msec 2024-11-13T16:35:45,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure 8ba00fb83d1a1bdde651c03df5ab8404, server=f8f85b0bb1d8,38437,1731515528789 in 203 msec 2024-11-13T16:35:45,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=105 2024-11-13T16:35:45,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, ASSIGN in 370 msec 2024-11-13T16:35:45,713 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:35:45,713 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515745713"}]},"ts":"1731515745713"} 2024-11-13T16:35:45,717 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-13T16:35:45,719 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:35:45,719 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-13T16:35:45,723 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-13T16:35:45,736 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0005_000001 (auth:SIMPLE) from 127.0.0.1:60074 2024-11-13T16:35:45,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:45,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:45,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:45,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:35:45,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-13T16:35:45,752 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0005/container_1731515545832_0005_01_000001/launch_container.sh] 2024-11-13T16:35:45,752 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0005/container_1731515545832_0005_01_000001/container_tokens] 2024-11-13T16:35:45,752 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0005/container_1731515545832_0005_01_000001/sysfs] 2024-11-13T16:35:45,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 484 msec 2024-11-13T16:35:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-13T16:35:45,899 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T16:35:45,904 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404., hostname=f8f85b0bb1d8,38437,1731515528789, seqNum=2] 2024-11-13T16:35:45,909 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3., hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=2] 2024-11-13T16:35:45,910 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-13T16:35:45,929 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.3 merge regions [8ba00fb83d1a1bdde651c03df5ab8404, 38ce8102dfca11130c2bb14697ca56d3] 2024-11-13T16:35:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[8ba00fb83d1a1bdde651c03df5ab8404, 38ce8102dfca11130c2bb14697ca56d3], force=true 2024-11-13T16:35:45,936 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[8ba00fb83d1a1bdde651c03df5ab8404, 38ce8102dfca11130c2bb14697ca56d3], force=true 2024-11-13T16:35:45,936 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[8ba00fb83d1a1bdde651c03df5ab8404, 38ce8102dfca11130c2bb14697ca56d3], force=true 2024-11-13T16:35:45,936 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[8ba00fb83d1a1bdde651c03df5ab8404, 38ce8102dfca11130c2bb14697ca56d3], force=true 2024-11-13T16:35:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T16:35:45,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, UNASSIGN}] 2024-11-13T16:35:45,956 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, UNASSIGN 2024-11-13T16:35:45,957 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, UNASSIGN 2024-11-13T16:35:45,957 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=38ce8102dfca11130c2bb14697ca56d3, regionState=CLOSING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:35:45,957 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=8ba00fb83d1a1bdde651c03df5ab8404, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:45,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, UNASSIGN because future has completed 2024-11-13T16:35:45,959 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T16:35:45,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8ba00fb83d1a1bdde651c03df5ab8404, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:35:45,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, UNASSIGN because future has completed 2024-11-13T16:35:45,960 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T16:35:45,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 38ce8102dfca11130c2bb14697ca56d3, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:35:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T16:35:46,112 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:46,112 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T16:35:46,112 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing 8ba00fb83d1a1bdde651c03df5ab8404, disabling compactions & flushes 2024-11-13T16:35:46,112 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:46,112 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:46,112 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. after waiting 0 ms 2024-11-13T16:35:46,112 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:46,112 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing 8ba00fb83d1a1bdde651c03df5ab8404 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-13T16:35:46,113 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:46,113 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T16:35:46,113 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing 38ce8102dfca11130c2bb14697ca56d3, disabling compactions & flushes 2024-11-13T16:35:46,113 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:46,113 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:46,113 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. after waiting 0 ms 2024-11-13T16:35:46,113 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:46,113 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing 38ce8102dfca11130c2bb14697ca56d3 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-13T16:35:46,133 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/.tmp/cf/2a8ebc47b62d4630a25e16b0afc477d6 is 28, key is 2/cf:/1731515745910/Put/seqid=0 2024-11-13T16:35:46,133 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/.tmp/cf/de2929cf776f4703ae5834e45251817d is 28, key is 1/cf:/1731515745905/Put/seqid=0 2024-11-13T16:35:46,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742128_1304 (size=4945) 2024-11-13T16:35:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742127_1303 (size=4945) 2024-11-13T16:35:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742127_1303 (size=4945) 2024-11-13T16:35:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742128_1304 (size=4945) 2024-11-13T16:35:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742127_1303 (size=4945) 2024-11-13T16:35:46,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742128_1304 (size=4945) 2024-11-13T16:35:46,142 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/.tmp/cf/de2929cf776f4703ae5834e45251817d 2024-11-13T16:35:46,143 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/.tmp/cf/2a8ebc47b62d4630a25e16b0afc477d6 2024-11-13T16:35:46,149 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/.tmp/cf/de2929cf776f4703ae5834e45251817d as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf/de2929cf776f4703ae5834e45251817d 2024-11-13T16:35:46,151 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/.tmp/cf/2a8ebc47b62d4630a25e16b0afc477d6 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf/2a8ebc47b62d4630a25e16b0afc477d6 2024-11-13T16:35:46,157 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf/2a8ebc47b62d4630a25e16b0afc477d6, entries=1, sequenceid=5, filesize=4.8 K 2024-11-13T16:35:46,158 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 38ce8102dfca11130c2bb14697ca56d3 in 45ms, sequenceid=5, compaction requested=false 2024-11-13T16:35:46,158 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf/de2929cf776f4703ae5834e45251817d, entries=1, sequenceid=5, filesize=4.8 K 2024-11-13T16:35:46,158 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-13T16:35:46,163 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8ba00fb83d1a1bdde651c03df5ab8404 in 51ms, sequenceid=5, compaction requested=false 2024-11-13T16:35:46,169 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T16:35:46,169 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:35:46,170 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. 2024-11-13T16:35:46,170 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T16:35:46,170 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for 38ce8102dfca11130c2bb14697ca56d3: Waiting for close lock at 1731515746113Running coprocessor pre-close hooks at 1731515746113Disabling compacts and flushes for region at 1731515746113Disabling writes for close at 1731515746113Obtaining lock to block concurrent updates at 1731515746113Preparing flush snapshotting stores in 38ce8102dfca11130c2bb14697ca56d3 at 1731515746113Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731515746114 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3. at 1731515746114Flushing 38ce8102dfca11130c2bb14697ca56d3/cf: creating writer at 1731515746114Flushing 38ce8102dfca11130c2bb14697ca56d3/cf: appending metadata at 1731515746132 (+18 ms)Flushing 38ce8102dfca11130c2bb14697ca56d3/cf: closing flushed file at 1731515746132Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76dda9c8: reopening flushed file at 1731515746150 (+18 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 38ce8102dfca11130c2bb14697ca56d3 in 45ms, sequenceid=5, compaction requested=false at 1731515746158 (+8 ms)Writing region close event to WAL at 1731515746163 (+5 ms)Running coprocessor post-close hooks at 1731515746169 (+6 ms)Closed at 1731515746169 2024-11-13T16:35:46,170 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:35:46,170 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. 2024-11-13T16:35:46,170 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for 8ba00fb83d1a1bdde651c03df5ab8404: Waiting for close lock at 1731515746112Running coprocessor pre-close hooks at 1731515746112Disabling compacts and flushes for region at 1731515746112Disabling writes for close at 1731515746112Obtaining lock to block concurrent updates at 1731515746112Preparing flush snapshotting stores in 8ba00fb83d1a1bdde651c03df5ab8404 at 1731515746112Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731515746113 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404. at 1731515746113Flushing 8ba00fb83d1a1bdde651c03df5ab8404/cf: creating writer at 1731515746114 (+1 ms)Flushing 8ba00fb83d1a1bdde651c03df5ab8404/cf: appending metadata at 1731515746132 (+18 ms)Flushing 8ba00fb83d1a1bdde651c03df5ab8404/cf: closing flushed file at 1731515746132Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a9cb040: reopening flushed file at 1731515746148 (+16 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8ba00fb83d1a1bdde651c03df5ab8404 in 51ms, sequenceid=5, compaction requested=false at 1731515746163 (+15 ms)Writing region close event to WAL at 1731515746165 (+2 ms)Running coprocessor post-close hooks at 1731515746170 (+5 ms)Closed at 1731515746170 2024-11-13T16:35:46,172 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed 38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:35:46,176 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=38ce8102dfca11130c2bb14697ca56d3, regionState=CLOSED 2024-11-13T16:35:46,176 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed 8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:35:46,177 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=8ba00fb83d1a1bdde651c03df5ab8404, regionState=CLOSED 2024-11-13T16:35:46,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 38ce8102dfca11130c2bb14697ca56d3, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:35:46,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8ba00fb83d1a1bdde651c03df5ab8404, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:35:46,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=112 2024-11-13T16:35:46,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 38ce8102dfca11130c2bb14697ca56d3, server=f8f85b0bb1d8,39619,1731515529105 in 220 msec 2024-11-13T16:35:46,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=38ce8102dfca11130c2bb14697ca56d3, UNASSIGN in 229 msec 2024-11-13T16:35:46,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=111 2024-11-13T16:35:46,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure 8ba00fb83d1a1bdde651c03df5ab8404, server=f8f85b0bb1d8,38437,1731515528789 in 222 msec 2024-11-13T16:35:46,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-13T16:35:46,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ba00fb83d1a1bdde651c03df5ab8404, UNASSIGN in 230 msec 2024-11-13T16:35:46,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742129_1305 (size=84) 2024-11-13T16:35:46,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742129_1305 (size=84) 2024-11-13T16:35:46,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742129_1305 (size=84) 2024-11-13T16:35:46,207 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:46,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742130_1306 (size=20) 2024-11-13T16:35:46,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742130_1306 (size=20) 2024-11-13T16:35:46,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742130_1306 (size=20) 2024-11-13T16:35:46,237 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:46,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742131_1307 (size=21) 2024-11-13T16:35:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742131_1307 (size=21) 2024-11-13T16:35:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742131_1307 (size=21) 2024-11-13T16:35:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T16:35:46,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742132_1308 (size=84) 2024-11-13T16:35:46,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742132_1308 (size=84) 2024-11-13T16:35:46,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742132_1308 (size=84) 2024-11-13T16:35:46,260 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:46,274 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-13T16:35:46,277 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745264.8ba00fb83d1a1bdde651c03df5ab8404.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-13T16:35:46,277 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731515745264.38ce8102dfca11130c2bb14697ca56d3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-13T16:35:46,277 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-13T16:35:46,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, ASSIGN}] 2024-11-13T16:35:46,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, ASSIGN 2024-11-13T16:35:46,304 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, ASSIGN; state=MERGED, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:35:46,456 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T16:35:46,456 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=5b7d129670ad8b90d0358e1bad77535d, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:46,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, ASSIGN because future has completed 2024-11-13T16:35:46,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b7d129670ad8b90d0358e1bad77535d, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:35:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T16:35:46,625 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:35:46,625 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => 5b7d129670ad8b90d0358e1bad77535d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.', STARTKEY => '', ENDKEY => ''} 2024-11-13T16:35:46,626 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. service=AccessControlService 2024-11-13T16:35:46,626 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:35:46,626 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,626 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:35:46,626 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,626 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,628 INFO [StoreOpener-5b7d129670ad8b90d0358e1bad77535d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,629 INFO [StoreOpener-5b7d129670ad8b90d0358e1bad77535d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5b7d129670ad8b90d0358e1bad77535d columnFamilyName cf 2024-11-13T16:35:46,629 DEBUG [StoreOpener-5b7d129670ad8b90d0358e1bad77535d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:35:46,650 DEBUG [StoreOpener-5b7d129670ad8b90d0358e1bad77535d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/2a8ebc47b62d4630a25e16b0afc477d6.38ce8102dfca11130c2bb14697ca56d3->hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf/2a8ebc47b62d4630a25e16b0afc477d6-top 2024-11-13T16:35:46,656 DEBUG [StoreOpener-5b7d129670ad8b90d0358e1bad77535d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/de2929cf776f4703ae5834e45251817d.8ba00fb83d1a1bdde651c03df5ab8404->hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf/de2929cf776f4703ae5834e45251817d-top 2024-11-13T16:35:46,656 INFO [StoreOpener-5b7d129670ad8b90d0358e1bad77535d-1 {}] regionserver.HStore(327): Store=5b7d129670ad8b90d0358e1bad77535d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:35:46,656 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,657 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,659 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,661 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,662 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened 5b7d129670ad8b90d0358e1bad77535d; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63404617, jitterRate=-0.0551975816488266}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:35:46,662 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:46,662 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for 5b7d129670ad8b90d0358e1bad77535d: Running coprocessor pre-open hook at 1731515746626Writing region info on filesystem at 1731515746626Initializing all the Stores at 1731515746627 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515746627Cleaning up temporary data from old regions at 1731515746659 (+32 ms)Running coprocessor post-open hooks at 1731515746662 (+3 ms)Region opened successfully at 1731515746662 2024-11-13T16:35:46,663 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d., pid=116, masterSystemTime=1731515746621 2024-11-13T16:35:46,664 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.,because compaction is disabled. 2024-11-13T16:35:46,666 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:35:46,666 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:35:46,666 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=5b7d129670ad8b90d0358e1bad77535d, regionState=OPEN, openSeqNum=9, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:35:46,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b7d129670ad8b90d0358e1bad77535d, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:35:46,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-13T16:35:46,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 5b7d129670ad8b90d0358e1bad77535d, server=f8f85b0bb1d8,38437,1731515528789 in 206 msec 2024-11-13T16:35:46,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-13T16:35:46,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, ASSIGN in 372 msec 2024-11-13T16:35:46,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[8ba00fb83d1a1bdde651c03df5ab8404, 38ce8102dfca11130c2bb14697ca56d3], force=true in 752 msec 2024-11-13T16:35:47,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-13T16:35:47,079 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T16:35:47,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-13T16:35:47,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515747079 (current time:1731515747079). 2024-11-13T16:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-13T16:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57789bb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:35:47,081 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:35:47,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:35:47,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:35:47,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70b5eba0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:47,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:35:47,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:35:47,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:47,083 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38252, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:35:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@501f8b1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:47,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:35:47,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:35:47,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:47,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:47,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:35:47,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:35:47,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:47,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:47,087 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:35:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77fbba0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:35:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:35:47,089 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:35:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:35:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:35:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7454dcef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:35:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:35:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:47,090 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38272, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:35:47,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bcb9519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:35:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:35:47,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:35:47,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:47,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:47,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:35:47,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:35:47,098 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:35:47,102 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:35:47,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:35:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:35:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:47,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:35:47,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-13T16:35:47,108 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:35:47,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:35:47,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-13T16:35:47,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-13T16:35:47,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T16:35:47,124 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:35:47,125 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:35:47,144 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:35:47,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742133_1309 (size=216) 2024-11-13T16:35:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742133_1309 (size=216) 2024-11-13T16:35:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742133_1309 (size=216) 2024-11-13T16:35:47,211 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:35:47,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b7d129670ad8b90d0358e1bad77535d}] 2024-11-13T16:35:47,215 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:47,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T16:35:47,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-13T16:35:47,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:35:47,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for 5b7d129670ad8b90d0358e1bad77535d: 2024-11-13T16:35:47,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-13T16:35:47,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:35:47,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/2a8ebc47b62d4630a25e16b0afc477d6.38ce8102dfca11130c2bb14697ca56d3->hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf/2a8ebc47b62d4630a25e16b0afc477d6-top, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/de2929cf776f4703ae5834e45251817d.8ba00fb83d1a1bdde651c03df5ab8404->hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf/de2929cf776f4703ae5834e45251817d-top] hfiles 2024-11-13T16:35:47,380 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/2a8ebc47b62d4630a25e16b0afc477d6.38ce8102dfca11130c2bb14697ca56d3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/de2929cf776f4703ae5834e45251817d.8ba00fb83d1a1bdde651c03df5ab8404 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742134_1310 (size=269) 2024-11-13T16:35:47,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742134_1310 (size=269) 2024-11-13T16:35:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742134_1310 (size=269) 2024-11-13T16:35:47,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:35:47,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-13T16:35:47,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-13T16:35:47,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:47,422 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:35:47,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-13T16:35:47,425 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:35:47,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5b7d129670ad8b90d0358e1bad77535d in 212 msec 2024-11-13T16:35:47,428 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:35:47,429 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:35:47,429 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,430 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T16:35:47,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742135_1311 (size=670) 2024-11-13T16:35:47,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742135_1311 (size=670) 2024-11-13T16:35:47,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742135_1311 (size=670) 2024-11-13T16:35:47,483 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:35:47,493 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:35:47,493 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,495 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:35:47,495 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-13T16:35:47,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 384 msec 2024-11-13T16:35:47,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:35:47,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-13T16:35:47,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-13T16:35:47,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-13T16:35:47,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-13T16:35:47,748 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T16:35:47,749 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749 2024-11-13T16:35:47,749 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:47,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:35:47,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,796 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:35:47,815 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:35:47,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742136_1312 (size=216) 2024-11-13T16:35:47,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742136_1312 (size=216) 2024-11-13T16:35:47,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742136_1312 (size=216) 2024-11-13T16:35:47,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742137_1313 (size=670) 2024-11-13T16:35:47,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742137_1313 (size=670) 2024-11-13T16:35:47,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742137_1313 (size=670) 2024-11-13T16:35:47,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:47,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:47,843 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-6075919675171230149.jar 2024-11-13T16:35:48,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,910 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-16302876187549806514.jar 2024-11-13T16:35:48,910 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,911 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:35:48,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:35:48,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:35:48,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:35:48,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:35:48,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:35:48,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:35:48,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:35:48,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:35:48,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:35:48,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:35:48,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:35:48,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:48,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:48,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:35:48,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:48,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:35:48,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:35:48,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:35:48,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742138_1314 (size=131440) 2024-11-13T16:35:48,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742138_1314 (size=131440) 2024-11-13T16:35:48,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742138_1314 (size=131440) 2024-11-13T16:35:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742139_1315 (size=6424544) 2024-11-13T16:35:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742139_1315 (size=6424544) 2024-11-13T16:35:48,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742139_1315 (size=6424544) 2024-11-13T16:35:49,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742140_1316 (size=4188619) 2024-11-13T16:35:49,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742140_1316 (size=4188619) 2024-11-13T16:35:49,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742140_1316 (size=4188619) 2024-11-13T16:35:49,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742141_1317 (size=1323991) 2024-11-13T16:35:49,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742141_1317 (size=1323991) 2024-11-13T16:35:49,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742141_1317 (size=1323991) 2024-11-13T16:35:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742142_1318 (size=903731) 2024-11-13T16:35:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742142_1318 (size=903731) 2024-11-13T16:35:49,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742142_1318 (size=903731) 2024-11-13T16:35:49,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742143_1319 (size=8360083) 2024-11-13T16:35:49,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742143_1319 (size=8360083) 2024-11-13T16:35:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742143_1319 (size=8360083) 2024-11-13T16:35:49,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742144_1320 (size=1877034) 2024-11-13T16:35:49,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742144_1320 (size=1877034) 2024-11-13T16:35:49,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742144_1320 (size=1877034) 2024-11-13T16:35:49,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742145_1321 (size=77835) 2024-11-13T16:35:49,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742145_1321 (size=77835) 2024-11-13T16:35:49,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742145_1321 (size=77835) 2024-11-13T16:35:50,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742146_1322 (size=30949) 2024-11-13T16:35:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742146_1322 (size=30949) 2024-11-13T16:35:50,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742146_1322 (size=30949) 2024-11-13T16:35:50,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742147_1323 (size=1597327) 2024-11-13T16:35:50,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742147_1323 (size=1597327) 2024-11-13T16:35:50,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742147_1323 (size=1597327) 2024-11-13T16:35:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742148_1324 (size=4695811) 2024-11-13T16:35:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742148_1324 (size=4695811) 2024-11-13T16:35:50,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742148_1324 (size=4695811) 2024-11-13T16:35:50,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742149_1325 (size=232957) 2024-11-13T16:35:50,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742149_1325 (size=232957) 2024-11-13T16:35:50,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742149_1325 (size=232957) 2024-11-13T16:35:50,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742150_1326 (size=127628) 2024-11-13T16:35:50,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742150_1326 (size=127628) 2024-11-13T16:35:50,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742150_1326 (size=127628) 2024-11-13T16:35:50,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742151_1327 (size=20406) 2024-11-13T16:35:50,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742151_1327 (size=20406) 2024-11-13T16:35:50,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742151_1327 (size=20406) 2024-11-13T16:35:50,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742152_1328 (size=5175431) 2024-11-13T16:35:50,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742152_1328 (size=5175431) 2024-11-13T16:35:50,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742152_1328 (size=5175431) 2024-11-13T16:35:50,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742153_1329 (size=440392) 2024-11-13T16:35:50,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742153_1329 (size=440392) 2024-11-13T16:35:50,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742153_1329 (size=440392) 2024-11-13T16:35:50,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742154_1330 (size=217634) 2024-11-13T16:35:50,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742154_1330 (size=217634) 2024-11-13T16:35:50,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742154_1330 (size=217634) 2024-11-13T16:35:50,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742155_1331 (size=1832290) 2024-11-13T16:35:50,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742155_1331 (size=1832290) 2024-11-13T16:35:50,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742155_1331 (size=1832290) 2024-11-13T16:35:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742156_1332 (size=322274) 2024-11-13T16:35:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742156_1332 (size=322274) 2024-11-13T16:35:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742156_1332 (size=322274) 2024-11-13T16:35:50,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742157_1333 (size=503880) 2024-11-13T16:35:50,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742157_1333 (size=503880) 2024-11-13T16:35:50,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742157_1333 (size=503880) 2024-11-13T16:35:50,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742158_1334 (size=29229) 2024-11-13T16:35:50,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742158_1334 (size=29229) 2024-11-13T16:35:50,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742158_1334 (size=29229) 2024-11-13T16:35:50,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742159_1335 (size=24096) 2024-11-13T16:35:50,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742159_1335 (size=24096) 2024-11-13T16:35:50,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742159_1335 (size=24096) 2024-11-13T16:35:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742160_1336 (size=111872) 2024-11-13T16:35:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742160_1336 (size=111872) 2024-11-13T16:35:50,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742160_1336 (size=111872) 2024-11-13T16:35:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742161_1337 (size=45609) 2024-11-13T16:35:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742161_1337 (size=45609) 2024-11-13T16:35:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742161_1337 (size=45609) 2024-11-13T16:35:50,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742162_1338 (size=136454) 2024-11-13T16:35:50,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742162_1338 (size=136454) 2024-11-13T16:35:50,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742162_1338 (size=136454) 2024-11-13T16:35:50,448 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:35:50,451 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-13T16:35:50,456 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-13T16:35:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742163_1339 (size=378) 2024-11-13T16:35:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742163_1339 (size=378) 2024-11-13T16:35:50,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742163_1339 (size=378) 2024-11-13T16:35:50,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742164_1340 (size=15) 2024-11-13T16:35:50,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742164_1340 (size=15) 2024-11-13T16:35:50,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742164_1340 (size=15) 2024-11-13T16:35:50,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742165_1341 (size=303787) 2024-11-13T16:35:50,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742165_1341 (size=303787) 2024-11-13T16:35:50,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742165_1341 (size=303787) 2024-11-13T16:35:50,551 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:35:50,551 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:35:50,743 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0006_000001 (auth:SIMPLE) from 127.0.0.1:60090 2024-11-13T16:35:52,129 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:36:00,776 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0006_000001 (auth:SIMPLE) from 127.0.0.1:47584 2024-11-13T16:36:01,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742166_1342 (size=349437) 2024-11-13T16:36:01,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742166_1342 (size=349437) 2024-11-13T16:36:01,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742166_1342 (size=349437) 2024-11-13T16:36:03,279 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0006_000001 (auth:SIMPLE) from 127.0.0.1:58822 2024-11-13T16:36:05,907 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:36:07,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742167_1343 (size=4945) 2024-11-13T16:36:07,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742167_1343 (size=4945) 2024-11-13T16:36:07,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742167_1343 (size=4945) 2024-11-13T16:36:07,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742168_1344 (size=4945) 2024-11-13T16:36:07,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742168_1344 (size=4945) 2024-11-13T16:36:07,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742168_1344 (size=4945) 2024-11-13T16:36:08,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742169_1345 (size=17474) 2024-11-13T16:36:08,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742169_1345 (size=17474) 2024-11-13T16:36:08,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742169_1345 (size=17474) 2024-11-13T16:36:08,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742170_1346 (size=482) 2024-11-13T16:36:08,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742170_1346 (size=482) 2024-11-13T16:36:08,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742170_1346 (size=482) 2024-11-13T16:36:08,101 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0006/container_1731515545832_0006_01_000002/launch_container.sh] 2024-11-13T16:36:08,101 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0006/container_1731515545832_0006_01_000002/container_tokens] 2024-11-13T16:36:08,101 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0006/container_1731515545832_0006_01_000002/sysfs] 2024-11-13T16:36:08,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742171_1347 (size=17474) 2024-11-13T16:36:08,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742171_1347 (size=17474) 2024-11-13T16:36:08,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742171_1347 (size=17474) 2024-11-13T16:36:08,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742172_1348 (size=349437) 2024-11-13T16:36:08,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742172_1348 (size=349437) 2024-11-13T16:36:08,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742172_1348 (size=349437) 2024-11-13T16:36:08,685 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0006_000001 (auth:SIMPLE) from 127.0.0.1:36834 2024-11-13T16:36:09,819 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:36:09,820 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:36:09,827 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:09,827 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:36:09,828 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:36:09,828 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:09,828 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-13T16:36:09,828 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-13T16:36:09,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:09,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-13T16:36:09,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515747749/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-13T16:36:09,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-13T16:36:09,850 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515769850"}]},"ts":"1731515769850"} 2024-11-13T16:36:09,852 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-13T16:36:09,852 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-13T16:36:09,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-13T16:36:09,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, UNASSIGN}] 2024-11-13T16:36:09,856 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, UNASSIGN 2024-11-13T16:36:09,857 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=5b7d129670ad8b90d0358e1bad77535d, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:09,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, UNASSIGN because future has completed 2024-11-13T16:36:09,859 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:09,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5b7d129670ad8b90d0358e1bad77535d, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-13T16:36:10,013 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:36:10,013 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:10,013 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing 5b7d129670ad8b90d0358e1bad77535d, disabling compactions & flushes 2024-11-13T16:36:10,013 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:36:10,013 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:36:10,014 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. after waiting 0 ms 2024-11-13T16:36:10,014 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:36:10,021 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-13T16:36:10,022 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:10,022 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d. 2024-11-13T16:36:10,022 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for 5b7d129670ad8b90d0358e1bad77535d: Waiting for close lock at 1731515770013Running coprocessor pre-close hooks at 1731515770013Disabling compacts and flushes for region at 1731515770013Disabling writes for close at 1731515770014 (+1 ms)Writing region close event to WAL at 1731515770015 (+1 ms)Running coprocessor post-close hooks at 1731515770022 (+7 ms)Closed at 1731515770022 2024-11-13T16:36:10,026 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed 5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:36:10,028 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=5b7d129670ad8b90d0358e1bad77535d, regionState=CLOSED 2024-11-13T16:36:10,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5b7d129670ad8b90d0358e1bad77535d, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:10,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-13T16:36:10,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure 5b7d129670ad8b90d0358e1bad77535d, server=f8f85b0bb1d8,38437,1731515528789 in 174 msec 2024-11-13T16:36:10,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-13T16:36:10,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5b7d129670ad8b90d0358e1bad77535d, UNASSIGN in 181 msec 2024-11-13T16:36:10,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-13T16:36:10,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 190 msec 2024-11-13T16:36:10,047 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515770047"}]},"ts":"1731515770047"} 2024-11-13T16:36:10,049 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-13T16:36:10,049 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-13T16:36:10,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 205 msec 2024-11-13T16:36:10,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-13T16:36:10,168 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T16:36:10,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,171 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,171 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,175 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:36:10,175 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:36:10,175 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:36:10,177 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/recovered.edits] 2024-11-13T16:36:10,177 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/recovered.edits] 2024-11-13T16:36:10,177 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/recovered.edits] 2024-11-13T16:36:10,181 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf/2a8ebc47b62d4630a25e16b0afc477d6 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/cf/2a8ebc47b62d4630a25e16b0afc477d6 2024-11-13T16:36:10,181 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf/de2929cf776f4703ae5834e45251817d to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/cf/de2929cf776f4703ae5834e45251817d 2024-11-13T16:36:10,181 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/2a8ebc47b62d4630a25e16b0afc477d6.38ce8102dfca11130c2bb14697ca56d3 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/2a8ebc47b62d4630a25e16b0afc477d6.38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:36:10,186 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/de2929cf776f4703ae5834e45251817d.8ba00fb83d1a1bdde651c03df5ab8404 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/cf/de2929cf776f4703ae5834e45251817d.8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:36:10,190 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/recovered.edits/8.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3/recovered.edits/8.seqid 2024-11-13T16:36:10,193 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/38ce8102dfca11130c2bb14697ca56d3 2024-11-13T16:36:10,193 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/recovered.edits/8.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404/recovered.edits/8.seqid 2024-11-13T16:36:10,193 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/recovered.edits/12.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d/recovered.edits/12.seqid 2024-11-13T16:36:10,194 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ba00fb83d1a1bdde651c03df5ab8404 2024-11-13T16:36:10,194 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5b7d129670ad8b90d0358e1bad77535d 2024-11-13T16:36:10,194 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-13T16:36:10,208 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,216 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-13T16:36:10,219 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-13T16:36:10,221 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,222 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-13T16:36:10,222 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515770222"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:10,226 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-13T16:36:10,226 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5b7d129670ad8b90d0358e1bad77535d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d.', STARTKEY => '', ENDKEY => ''}] 2024-11-13T16:36:10,226 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-13T16:36:10,226 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515770226"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:10,233 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-13T16:36:10,239 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 70 msec 2024-11-13T16:36:10,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,284 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-13T16:36:10,284 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-13T16:36:10,284 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-13T16:36:10,286 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-13T16:36:10,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,341 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:10,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-13T16:36:10,342 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:10,342 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,342 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-13T16:36:10,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,345 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:10,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-13T16:36:10,347 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515770347"}]},"ts":"1731515770347"} 2024-11-13T16:36:10,349 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-13T16:36:10,349 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-13T16:36:10,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-13T16:36:10,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, UNASSIGN}] 2024-11-13T16:36:10,365 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, UNASSIGN 2024-11-13T16:36:10,365 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, UNASSIGN 2024-11-13T16:36:10,367 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0b6fd4461bcb296079228f893028756c, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:10,367 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:10,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, UNASSIGN because future has completed 2024-11-13T16:36:10,370 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:10,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0b6fd4461bcb296079228f893028756c, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:10,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, UNASSIGN because future has completed 2024-11-13T16:36:10,372 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:10,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-13T16:36:10,524 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 0b6fd4461bcb296079228f893028756c 2024-11-13T16:36:10,524 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:10,524 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 0b6fd4461bcb296079228f893028756c, disabling compactions & flushes 2024-11-13T16:36:10,524 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:36:10,524 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:36:10,524 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. after waiting 0 ms 2024-11-13T16:36:10,524 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:36:10,526 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:36:10,526 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:10,526 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing d8bcb5a49f7b9b9ccf99cda1f05a4ae8, disabling compactions & flushes 2024-11-13T16:36:10,526 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:36:10,526 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:36:10,526 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. after waiting 0 ms 2024-11-13T16:36:10,526 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:36:10,530 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:36:10,530 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:36:10,530 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:10,530 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:10,530 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8. 2024-11-13T16:36:10,530 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c. 2024-11-13T16:36:10,530 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for d8bcb5a49f7b9b9ccf99cda1f05a4ae8: Waiting for close lock at 1731515770526Running coprocessor pre-close hooks at 1731515770526Disabling compacts and flushes for region at 1731515770526Disabling writes for close at 1731515770526Writing region close event to WAL at 1731515770527 (+1 ms)Running coprocessor post-close hooks at 1731515770530 (+3 ms)Closed at 1731515770530 2024-11-13T16:36:10,530 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 0b6fd4461bcb296079228f893028756c: Waiting for close lock at 1731515770524Running coprocessor pre-close hooks at 1731515770524Disabling compacts and flushes for region at 1731515770524Disabling writes for close at 1731515770524Writing region close event to WAL at 1731515770525 (+1 ms)Running coprocessor post-close hooks at 1731515770530 (+5 ms)Closed at 1731515770530 2024-11-13T16:36:10,533 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:36:10,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, regionState=CLOSED 2024-11-13T16:36:10,533 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 0b6fd4461bcb296079228f893028756c 2024-11-13T16:36:10,534 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0b6fd4461bcb296079228f893028756c, regionState=CLOSED 2024-11-13T16:36:10,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:10,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0b6fd4461bcb296079228f893028756c, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:10,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=126 2024-11-13T16:36:10,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=127 2024-11-13T16:36:10,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure d8bcb5a49f7b9b9ccf99cda1f05a4ae8, server=f8f85b0bb1d8,38437,1731515528789 in 165 msec 2024-11-13T16:36:10,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 0b6fd4461bcb296079228f893028756c, server=f8f85b0bb1d8,38523,1731515529292 in 167 msec 2024-11-13T16:36:10,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=d8bcb5a49f7b9b9ccf99cda1f05a4ae8, UNASSIGN in 176 msec 2024-11-13T16:36:10,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-11-13T16:36:10,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=0b6fd4461bcb296079228f893028756c, UNASSIGN in 177 msec 2024-11-13T16:36:10,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-13T16:36:10,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 193 msec 2024-11-13T16:36:10,547 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515770546"}]},"ts":"1731515770546"} 2024-11-13T16:36:10,548 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-13T16:36:10,548 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-13T16:36:10,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 207 msec 2024-11-13T16:36:10,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-13T16:36:10,668 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T16:36:10,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,671 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,671 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,673 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,675 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:36:10,675 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c 2024-11-13T16:36:10,680 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/recovered.edits] 2024-11-13T16:36:10,680 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/recovered.edits] 2024-11-13T16:36:10,684 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf/13ff8e10a15d42c4b06a2fddebcf4a22 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/cf/13ff8e10a15d42c4b06a2fddebcf4a22 2024-11-13T16:36:10,684 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf/097e4ad3d4bf4153b12637323f4c0170 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/cf/097e4ad3d4bf4153b12637323f4c0170 2024-11-13T16:36:10,690 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c/recovered.edits/9.seqid 2024-11-13T16:36:10,691 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8/recovered.edits/9.seqid 2024-11-13T16:36:10,691 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/0b6fd4461bcb296079228f893028756c 2024-11-13T16:36:10,691 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithMergeRegion/d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:36:10,691 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-13T16:36:10,692 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-13T16:36:10,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,696 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-11-13T16:36:10,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,699 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T16:36:10,699 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T16:36:10,700 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-13T16:36:10,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,708 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241113979e797ed24f4965bac2ba6b2dfcdc79_0b6fd4461bcb296079228f893028756c 2024-11-13T16:36:10,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,709 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null 2024-11-13T16:36:10,709 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T16:36:10,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-13T16:36:10,712 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241113c0d4c6e563174c06adaa5b077a142ffc_d8bcb5a49f7b9b9ccf99cda1f05a4ae8 2024-11-13T16:36:10,713 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-13T16:36:10,717 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,720 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-13T16:36:10,723 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-13T16:36:10,728 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,728 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-13T16:36:10,728 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515770728"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:10,728 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515770728"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:10,731 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:36:10,731 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d8bcb5a49f7b9b9ccf99cda1f05a4ae8, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731515741991.d8bcb5a49f7b9b9ccf99cda1f05a4ae8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0b6fd4461bcb296079228f893028756c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731515741991.0b6fd4461bcb296079228f893028756c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:36:10,731 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-13T16:36:10,731 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515770731"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:10,734 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-13T16:36:10,739 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 70 msec 2024-11-13T16:36:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-13T16:36:10,820 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,820 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-13T16:36:10,844 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-13T16:36:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-13T16:36:10,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:10,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-13T16:36:10,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:10,911 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=799 (was 794) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:49482 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38905 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-456362114_1 at /127.0.0.1:49456 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:58940 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 99033) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4875 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:33734 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:38905 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=804 (was 798) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1385 (was 1436), ProcessCount=17 (was 19), AvailableMemoryMB=2712 (was 1106) - AvailableMemoryMB LEAK? - 2024-11-13T16:36:10,911 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-13T16:36:10,935 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=799, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=1385, ProcessCount=17, AvailableMemoryMB=2711 2024-11-13T16:36:10,935 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-13T16:36:10,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:36:10,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:10,938 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:36:10,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-13T16:36:10,939 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:36:10,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T16:36:10,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742173_1349 (size=443) 2024-11-13T16:36:10,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742173_1349 (size=443) 2024-11-13T16:36:10,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742173_1349 (size=443) 2024-11-13T16:36:10,947 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 22a119933ccc4c3eb35919c2f4b07619, NAME => 'testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:10,947 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5b965f308a04d32d777100e414299207, NAME => 'testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:10,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742175_1351 (size=68) 2024-11-13T16:36:10,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742175_1351 (size=68) 2024-11-13T16:36:10,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742175_1351 (size=68) 2024-11-13T16:36:10,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742174_1350 (size=68) 2024-11-13T16:36:10,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742174_1350 (size=68) 2024-11-13T16:36:10,957 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 5b965f308a04d32d777100e414299207, disabling compactions & flushes 2024-11-13T16:36:10,958 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:10,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742174_1350 (size=68) 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. after waiting 0 ms 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:10,958 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5b965f308a04d32d777100e414299207: Waiting for close lock at 1731515770957Disabling compacts and flushes for region at 1731515770957Disabling writes for close at 1731515770958 (+1 ms)Writing region close event to WAL at 1731515770958Closed at 1731515770958 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 22a119933ccc4c3eb35919c2f4b07619, disabling compactions & flushes 2024-11-13T16:36:10,958 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. after waiting 0 ms 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:10,958 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:10,958 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 22a119933ccc4c3eb35919c2f4b07619: Waiting for close lock at 1731515770958Disabling compacts and flushes for region at 1731515770958Disabling writes for close at 1731515770958Writing region close event to WAL at 1731515770958Closed at 1731515770958 2024-11-13T16:36:10,960 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:36:10,960 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731515770960"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515770960"}]},"ts":"1731515770960"} 2024-11-13T16:36:10,960 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731515770960"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515770960"}]},"ts":"1731515770960"} 2024-11-13T16:36:10,963 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:36:10,963 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:36:10,964 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515770963"}]},"ts":"1731515770963"} 2024-11-13T16:36:10,965 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-13T16:36:10,965 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:36:10,967 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:36:10,967 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:36:10,967 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:36:10,967 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:36:10,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, ASSIGN}] 2024-11-13T16:36:10,969 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, ASSIGN 2024-11-13T16:36:10,969 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, ASSIGN 2024-11-13T16:36:10,970 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:36:10,970 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:36:11,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T16:36:11,120 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:36:11,120 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=22a119933ccc4c3eb35919c2f4b07619, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:11,120 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=5b965f308a04d32d777100e414299207, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:11,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, ASSIGN because future has completed 2024-11-13T16:36:11,122 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22a119933ccc4c3eb35919c2f4b07619, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:11,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, ASSIGN because future has completed 2024-11-13T16:36:11,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b965f308a04d32d777100e414299207, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:11,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T16:36:11,278 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:11,278 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 22a119933ccc4c3eb35919c2f4b07619, NAME => 'testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:36:11,278 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. service=AccessControlService 2024-11-13T16:36:11,279 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:11,279 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 5b965f308a04d32d777100e414299207, NAME => 'testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. service=AccessControlService 2024-11-13T16:36:11,279 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:11,279 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,280 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:11,280 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,280 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,281 INFO [StoreOpener-22a119933ccc4c3eb35919c2f4b07619-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,281 INFO [StoreOpener-5b965f308a04d32d777100e414299207-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,282 INFO [StoreOpener-22a119933ccc4c3eb35919c2f4b07619-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 22a119933ccc4c3eb35919c2f4b07619 columnFamilyName cf 2024-11-13T16:36:11,282 INFO [StoreOpener-5b965f308a04d32d777100e414299207-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5b965f308a04d32d777100e414299207 columnFamilyName cf 2024-11-13T16:36:11,283 DEBUG [StoreOpener-5b965f308a04d32d777100e414299207-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:11,284 DEBUG [StoreOpener-22a119933ccc4c3eb35919c2f4b07619-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:11,284 INFO [StoreOpener-5b965f308a04d32d777100e414299207-1 {}] regionserver.HStore(327): Store=5b965f308a04d32d777100e414299207/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:11,284 INFO [StoreOpener-22a119933ccc4c3eb35919c2f4b07619-1 {}] regionserver.HStore(327): Store=22a119933ccc4c3eb35919c2f4b07619/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:11,285 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,285 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,286 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,288 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,288 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,290 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:11,290 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:11,290 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 22a119933ccc4c3eb35919c2f4b07619; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74206003, jitterRate=0.10575561225414276}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:11,290 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 5b965f308a04d32d777100e414299207; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69620609, jitterRate=0.03742791712284088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:11,290 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:11,290 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5b965f308a04d32d777100e414299207 2024-11-13T16:36:11,291 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 22a119933ccc4c3eb35919c2f4b07619: Running coprocessor pre-open hook at 1731515771279Writing region info on filesystem at 1731515771279Initializing all the Stores at 1731515771280 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515771280Cleaning up temporary data from old regions at 1731515771286 (+6 ms)Running coprocessor post-open hooks at 1731515771290 (+4 ms)Region opened successfully at 1731515771291 (+1 ms) 2024-11-13T16:36:11,291 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 5b965f308a04d32d777100e414299207: Running coprocessor pre-open hook at 1731515771280Writing region info on filesystem at 1731515771280Initializing all the Stores at 1731515771280Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515771280Cleaning up temporary data from old regions at 1731515771286 (+6 ms)Running coprocessor post-open hooks at 1731515771290 (+4 ms)Region opened successfully at 1731515771291 (+1 ms) 2024-11-13T16:36:11,292 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619., pid=134, masterSystemTime=1731515771274 2024-11-13T16:36:11,292 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207., pid=135, masterSystemTime=1731515771275 2024-11-13T16:36:11,295 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:11,295 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:11,297 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=22a119933ccc4c3eb35919c2f4b07619, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:11,298 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:11,298 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:11,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 22a119933ccc4c3eb35919c2f4b07619, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:11,301 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=5b965f308a04d32d777100e414299207, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:11,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b965f308a04d32d777100e414299207, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:11,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-11-13T16:36:11,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure 22a119933ccc4c3eb35919c2f4b07619, server=f8f85b0bb1d8,38437,1731515528789 in 181 msec 2024-11-13T16:36:11,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-11-13T16:36:11,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, ASSIGN in 339 msec 2024-11-13T16:36:11,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 5b965f308a04d32d777100e414299207, server=f8f85b0bb1d8,38523,1731515529292 in 182 msec 2024-11-13T16:36:11,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=131 2024-11-13T16:36:11,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, ASSIGN in 341 msec 2024-11-13T16:36:11,311 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:36:11,311 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515771311"}]},"ts":"1731515771311"} 2024-11-13T16:36:11,313 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-13T16:36:11,314 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:36:11,314 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-13T16:36:11,321 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T16:36:11,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:11,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:11,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:11,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:11,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:11,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:11,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:11,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:11,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 591 msec 2024-11-13T16:36:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-13T16:36:11,571 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T16:36:11,571 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:11,575 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-13T16:36:11,575 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:11,575 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:11,578 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:11,587 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:11,600 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:11,605 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T16:36:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515771605 (current time:1731515771605). 2024-11-13T16:36:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:36:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-13T16:36:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@331de648, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:11,607 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:11,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:11,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:11,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26074e6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:11,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:11,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:11,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:11,609 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:11,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e43e19e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:11,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:11,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:11,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:11,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:11,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:11,618 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de8c8a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:11,622 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:11,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:11,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:11,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c003c52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:11,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:11,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:11,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:11,625 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:11,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e743660, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:11,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:11,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:11,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:11,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:11,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:11,639 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:11,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T16:36:11,642 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T16:36:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-13T16:36:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T16:36:11,652 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:11,656 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:11,663 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742176_1352 (size=170) 2024-11-13T16:36:11,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742176_1352 (size=170) 2024-11-13T16:36:11,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742176_1352 (size=170) 2024-11-13T16:36:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T16:36:11,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T16:36:12,110 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:12,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207}] 2024-11-13T16:36:12,114 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207 2024-11-13T16:36:12,115 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:12,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-13T16:36:12,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:12,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-13T16:36:12,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for 22a119933ccc4c3eb35919c2f4b07619: 2024-11-13T16:36:12,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-13T16:36:12,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 5b965f308a04d32d777100e414299207: 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:12,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:36:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T16:36:12,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742177_1353 (size=71) 2024-11-13T16:36:12,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742177_1353 (size=71) 2024-11-13T16:36:12,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:12,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-13T16:36:12,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-13T16:36:12,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:12,286 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:12,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742177_1353 (size=71) 2024-11-13T16:36:12,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619 in 176 msec 2024-11-13T16:36:12,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742178_1354 (size=71) 2024-11-13T16:36:12,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742178_1354 (size=71) 2024-11-13T16:36:12,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742178_1354 (size=71) 2024-11-13T16:36:12,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:12,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-13T16:36:12,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-13T16:36:12,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 5b965f308a04d32d777100e414299207 2024-11-13T16:36:12,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207 2024-11-13T16:36:12,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=136 2024-11-13T16:36:12,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207 in 215 msec 2024-11-13T16:36:12,340 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:12,342 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:12,344 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:12,345 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:12,345 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:12,345 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:36:12,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742179_1355 (size=63) 2024-11-13T16:36:12,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742179_1355 (size=63) 2024-11-13T16:36:12,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742179_1355 (size=63) 2024-11-13T16:36:12,387 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:12,387 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-13T16:36:12,388 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-13T16:36:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742180_1356 (size=653) 2024-11-13T16:36:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742180_1356 (size=653) 2024-11-13T16:36:12,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742180_1356 (size=653) 2024-11-13T16:36:12,404 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:12,410 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:12,410 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-13T16:36:12,412 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:12,412 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-13T16:36:12,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 769 msec 2024-11-13T16:36:12,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-13T16:36:12,789 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T16:36:12,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:12,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:12,810 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:12,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-13T16:36:12,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:12,816 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:12,819 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:12,843 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:12,861 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:12,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T16:36:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515772871 (current time:1731515772871). 2024-11-13T16:36:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:36:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-13T16:36:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f98e714, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:12,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:12,877 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:12,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:12,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:12,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@596559c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:12,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:12,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:12,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:12,882 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:12,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a627178, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:12,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:12,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:12,887 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:12,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:12,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:12,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:12,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:12,891 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d439320, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:12,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:12,896 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:12,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:12,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:12,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38ed25c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:12,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:12,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:12,902 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55668, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:12,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:12,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16330129, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:12,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:12,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:12,909 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:12,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:12,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:12,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:12,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:12,920 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T16:36:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-13T16:36:12,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-13T16:36:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T16:36:12,927 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:12,937 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:12,955 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:13,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T16:36:13,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742181_1357 (size=165) 2024-11-13T16:36:13,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742181_1357 (size=165) 2024-11-13T16:36:13,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742181_1357 (size=165) 2024-11-13T16:36:13,049 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:13,049 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207}] 2024-11-13T16:36:13,056 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207 2024-11-13T16:36:13,056 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:13,118 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-13T16:36:13,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-13T16:36:13,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:13,218 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 5b965f308a04d32d777100e414299207 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-13T16:36:13,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-13T16:36:13,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:13,223 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing 22a119933ccc4c3eb35919c2f4b07619 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-13T16:36:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T16:36:13,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207 is 71, key is 11e692fb4d2cebc833429203e8195f49/cf:q/1731515772806/Put/seqid=0 2024-11-13T16:36:13,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619 is 71, key is 01b21c5b3dc93fb699ce3d4932c5adfb/cf:q/1731515772801/Put/seqid=0 2024-11-13T16:36:13,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742182_1358 (size=8101) 2024-11-13T16:36:13,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742182_1358 (size=8101) 2024-11-13T16:36:13,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:13,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742182_1358 (size=8101) 2024-11-13T16:36:13,384 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207 2024-11-13T16:36:13,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/.tmp/cf/1b90b94f220d4530b2cdbd3e145ba36c, store: [table=testtb-testExportExpiredSnapshot family=cf region=5b965f308a04d32d777100e414299207] 2024-11-13T16:36:13,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/.tmp/cf/1b90b94f220d4530b2cdbd3e145ba36c is 209, key is 1e2c297072ab16769e4cafacc885a7c68/cf:q/1731515772806/Put/seqid=0 2024-11-13T16:36:13,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742183_1359 (size=5172) 2024-11-13T16:36:13,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742183_1359 (size=5172) 2024-11-13T16:36:13,401 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:13,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742183_1359 (size=5172) 2024-11-13T16:36:13,420 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:13,422 WARN [regionserver/f8f85b0bb1d8:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-11-13T16:36:13,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/.tmp/cf/6abf8651ede14f0c84238ddc990b7115, store: [table=testtb-testExportExpiredSnapshot family=cf region=22a119933ccc4c3eb35919c2f4b07619] 2024-11-13T16:36:13,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/.tmp/cf/6abf8651ede14f0c84238ddc990b7115 is 209, key is 001b8ee04e90a32213510fdd40b1be7f0/cf:q/1731515772801/Put/seqid=0 2024-11-13T16:36:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742185_1361 (size=6123) 2024-11-13T16:36:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742185_1361 (size=6123) 2024-11-13T16:36:13,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742185_1361 (size=6123) 2024-11-13T16:36:13,499 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/.tmp/cf/6abf8651ede14f0c84238ddc990b7115 2024-11-13T16:36:13,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742184_1360 (size=14792) 2024-11-13T16:36:13,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742184_1360 (size=14792) 2024-11-13T16:36:13,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742184_1360 (size=14792) 2024-11-13T16:36:13,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/.tmp/cf/1b90b94f220d4530b2cdbd3e145ba36c 2024-11-13T16:36:13,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/.tmp/cf/6abf8651ede14f0c84238ddc990b7115 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf/6abf8651ede14f0c84238ddc990b7115 2024-11-13T16:36:13,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf/6abf8651ede14f0c84238ddc990b7115, entries=4, sequenceid=6, filesize=6.0 K 2024-11-13T16:36:13,539 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 22a119933ccc4c3eb35919c2f4b07619 in 316ms, sequenceid=6, compaction requested=false 2024-11-13T16:36:13,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for 22a119933ccc4c3eb35919c2f4b07619: 2024-11-13T16:36:13,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. for snaptb0-testExportExpiredSnapshot completed. 2024-11-13T16:36:13,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:13,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf/6abf8651ede14f0c84238ddc990b7115] hfiles 2024-11-13T16:36:13,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf/6abf8651ede14f0c84238ddc990b7115 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/.tmp/cf/1b90b94f220d4530b2cdbd3e145ba36c as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf/1b90b94f220d4530b2cdbd3e145ba36c 2024-11-13T16:36:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T16:36:13,564 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf/1b90b94f220d4530b2cdbd3e145ba36c, entries=46, sequenceid=6, filesize=14.4 K 2024-11-13T16:36:13,571 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 5b965f308a04d32d777100e414299207 in 354ms, sequenceid=6, compaction requested=false 2024-11-13T16:36:13,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 5b965f308a04d32d777100e414299207: 2024-11-13T16:36:13,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. for snaptb0-testExportExpiredSnapshot completed. 2024-11-13T16:36:13,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:13,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf/1b90b94f220d4530b2cdbd3e145ba36c] hfiles 2024-11-13T16:36:13,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf/1b90b94f220d4530b2cdbd3e145ba36c for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742186_1362 (size=110) 2024-11-13T16:36:13,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742186_1362 (size=110) 2024-11-13T16:36:13,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742186_1362 (size=110) 2024-11-13T16:36:13,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:13,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-13T16:36:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-13T16:36:13,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:13,626 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:13,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 22a119933ccc4c3eb35919c2f4b07619 in 582 msec 2024-11-13T16:36:13,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742187_1363 (size=110) 2024-11-13T16:36:13,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742187_1363 (size=110) 2024-11-13T16:36:13,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742187_1363 (size=110) 2024-11-13T16:36:13,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:13,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-13T16:36:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-13T16:36:13,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 5b965f308a04d32d777100e414299207 2024-11-13T16:36:13,687 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207 2024-11-13T16:36:13,692 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=139 2024-11-13T16:36:13,692 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5b965f308a04d32d777100e414299207 in 639 msec 2024-11-13T16:36:13,692 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:13,694 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:13,695 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:13,696 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:13,696 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:13,697 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619] hfiles 2024-11-13T16:36:13,698 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207 2024-11-13T16:36:13,698 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:13,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742188_1364 (size=294) 2024-11-13T16:36:13,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742188_1364 (size=294) 2024-11-13T16:36:13,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742188_1364 (size=294) 2024-11-13T16:36:13,773 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:13,773 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,775 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742189_1365 (size=963) 2024-11-13T16:36:13,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742189_1365 (size=963) 2024-11-13T16:36:13,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742189_1365 (size=963) 2024-11-13T16:36:13,885 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:13,921 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:13,922 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:13,927 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:13,927 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-13T16:36:13,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 1.0060 sec 2024-11-13T16:36:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-13T16:36:14,060 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T16:36:14,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:36:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-13T16:36:14,081 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:36:14,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-13T16:36:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T16:36:14,087 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:36:14,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742190_1366 (size=436) 2024-11-13T16:36:14,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742190_1366 (size=436) 2024-11-13T16:36:14,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742190_1366 (size=436) 2024-11-13T16:36:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T16:36:14,192 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6d9281e5cd6679c218f14227797e5659, NAME => 'testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:14,192 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 33bde9964e83af90386f782c94c4e709, NAME => 'testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:14,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742192_1368 (size=61) 2024-11-13T16:36:14,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742192_1368 (size=61) 2024-11-13T16:36:14,261 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:14,261 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 33bde9964e83af90386f782c94c4e709, disabling compactions & flushes 2024-11-13T16:36:14,261 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:14,261 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:14,261 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. after waiting 0 ms 2024-11-13T16:36:14,261 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:14,261 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:14,262 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 33bde9964e83af90386f782c94c4e709: Waiting for close lock at 1731515774261Disabling compacts and flushes for region at 1731515774261Disabling writes for close at 1731515774261Writing region close event to WAL at 1731515774261Closed at 1731515774261 2024-11-13T16:36:14,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742191_1367 (size=61) 2024-11-13T16:36:14,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742192_1368 (size=61) 2024-11-13T16:36:14,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742191_1367 (size=61) 2024-11-13T16:36:14,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742191_1367 (size=61) 2024-11-13T16:36:14,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T16:36:14,664 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:14,665 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 6d9281e5cd6679c218f14227797e5659, disabling compactions & flushes 2024-11-13T16:36:14,665 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:14,665 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:14,665 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. after waiting 0 ms 2024-11-13T16:36:14,665 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:14,665 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:14,665 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6d9281e5cd6679c218f14227797e5659: Waiting for close lock at 1731515774665Disabling compacts and flushes for region at 1731515774665Disabling writes for close at 1731515774665Writing region close event to WAL at 1731515774665Closed at 1731515774665 2024-11-13T16:36:14,666 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:36:14,667 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731515774666"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515774666"}]},"ts":"1731515774666"} 2024-11-13T16:36:14,667 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731515774666"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515774666"}]},"ts":"1731515774666"} 2024-11-13T16:36:14,670 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:36:14,671 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:36:14,672 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515774671"}]},"ts":"1731515774671"} 2024-11-13T16:36:14,674 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-13T16:36:14,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:36:14,675 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:36:14,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:36:14,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:36:14,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:36:14,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:36:14,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:36:14,676 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:36:14,676 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:36:14,676 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:36:14,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:36:14,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6d9281e5cd6679c218f14227797e5659, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=33bde9964e83af90386f782c94c4e709, ASSIGN}] 2024-11-13T16:36:14,678 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=33bde9964e83af90386f782c94c4e709, ASSIGN 2024-11-13T16:36:14,678 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6d9281e5cd6679c218f14227797e5659, ASSIGN 2024-11-13T16:36:14,679 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6d9281e5cd6679c218f14227797e5659, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:36:14,679 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=33bde9964e83af90386f782c94c4e709, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:36:14,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T16:36:14,785 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0006_000001 (auth:SIMPLE) from 127.0.0.1:58614 2024-11-13T16:36:14,797 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0006/container_1731515545832_0006_01_000001/launch_container.sh] 2024-11-13T16:36:14,797 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0006/container_1731515545832_0006_01_000001/container_tokens] 2024-11-13T16:36:14,797 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0006/container_1731515545832_0006_01_000001/sysfs] 2024-11-13T16:36:14,830 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:36:14,830 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=6d9281e5cd6679c218f14227797e5659, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:14,830 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=33bde9964e83af90386f782c94c4e709, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:36:14,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6d9281e5cd6679c218f14227797e5659, ASSIGN because future has completed 2024-11-13T16:36:14,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d9281e5cd6679c218f14227797e5659, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:14,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=33bde9964e83af90386f782c94c4e709, ASSIGN because future has completed 2024-11-13T16:36:14,833 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 33bde9964e83af90386f782c94c4e709, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:36:14,992 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => 6d9281e5cd6679c218f14227797e5659, NAME => 'testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:36:14,993 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. service=AccessControlService 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 33bde9964e83af90386f782c94c4e709, NAME => 'testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. service=AccessControlService 2024-11-13T16:36:14,993 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:14,993 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:14,993 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:14,994 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:14,994 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:14,996 INFO [StoreOpener-33bde9964e83af90386f782c94c4e709-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:14,997 INFO [StoreOpener-33bde9964e83af90386f782c94c4e709-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 33bde9964e83af90386f782c94c4e709 columnFamilyName cf 2024-11-13T16:36:14,998 DEBUG [StoreOpener-33bde9964e83af90386f782c94c4e709-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:15,000 INFO [StoreOpener-33bde9964e83af90386f782c94c4e709-1 {}] regionserver.HStore(327): Store=33bde9964e83af90386f782c94c4e709/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:15,000 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,003 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,003 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,004 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,004 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,007 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,010 INFO [StoreOpener-6d9281e5cd6679c218f14227797e5659-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,010 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:15,011 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 33bde9964e83af90386f782c94c4e709; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70390666, jitterRate=0.048902660608291626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:15,011 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,012 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 33bde9964e83af90386f782c94c4e709: Running coprocessor pre-open hook at 1731515774994Writing region info on filesystem at 1731515774994Initializing all the Stores at 1731515774995 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515774995Cleaning up temporary data from old regions at 1731515775004 (+9 ms)Running coprocessor post-open hooks at 1731515775011 (+7 ms)Region opened successfully at 1731515775012 (+1 ms) 2024-11-13T16:36:15,014 INFO [StoreOpener-6d9281e5cd6679c218f14227797e5659-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6d9281e5cd6679c218f14227797e5659 columnFamilyName cf 2024-11-13T16:36:15,015 DEBUG [StoreOpener-6d9281e5cd6679c218f14227797e5659-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:15,015 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709., pid=146, masterSystemTime=1731515774986 2024-11-13T16:36:15,016 INFO [StoreOpener-6d9281e5cd6679c218f14227797e5659-1 {}] regionserver.HStore(327): Store=6d9281e5cd6679c218f14227797e5659/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:15,016 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,017 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,018 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,019 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,019 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,020 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,022 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:15,025 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened 6d9281e5cd6679c218f14227797e5659; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65351037, jitterRate=-0.026193663477897644}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:15,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,025 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for 6d9281e5cd6679c218f14227797e5659: Running coprocessor pre-open hook at 1731515774994Writing region info on filesystem at 1731515774994Initializing all the Stores at 1731515774998 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515774998Cleaning up temporary data from old regions at 1731515775019 (+21 ms)Running coprocessor post-open hooks at 1731515775025 (+6 ms)Region opened successfully at 1731515775025 2024-11-13T16:36:15,036 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:15,036 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:15,037 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=33bde9964e83af90386f782c94c4e709, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:36:15,038 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659., pid=145, masterSystemTime=1731515774985 2024-11-13T16:36:15,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 33bde9964e83af90386f782c94c4e709, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:36:15,043 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=6d9281e5cd6679c218f14227797e5659, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:15,046 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:15,047 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:15,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d9281e5cd6679c218f14227797e5659, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:15,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-13T16:36:15,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 33bde9964e83af90386f782c94c4e709, server=f8f85b0bb1d8,39619,1731515529105 in 209 msec 2024-11-13T16:36:15,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-13T16:36:15,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure 6d9281e5cd6679c218f14227797e5659, server=f8f85b0bb1d8,38523,1731515529292 in 222 msec 2024-11-13T16:36:15,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=33bde9964e83af90386f782c94c4e709, ASSIGN in 379 msec 2024-11-13T16:36:15,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=143, resume processing ppid=142 2024-11-13T16:36:15,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6d9281e5cd6679c218f14227797e5659, ASSIGN in 382 msec 2024-11-13T16:36:15,064 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:36:15,064 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515775064"}]},"ts":"1731515775064"} 2024-11-13T16:36:15,067 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-13T16:36:15,069 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:36:15,069 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-13T16:36:15,075 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T16:36:15,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:15,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:15,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:15,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:15,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:15,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 1.0480 sec 2024-11-13T16:36:15,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-13T16:36:15,221 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-13T16:36:15,221 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,224 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-13T16:36:15,225 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:15,225 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:15,229 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,243 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,259 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:15,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39619 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:15,281 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-13T16:36:15,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:15,286 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:15,289 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,301 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-13T16:36:15,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-13T16:36:15,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-13T16:36:15,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:15,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c1b0ad4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:15,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:15,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:15,314 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:15,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:15,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:15,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58ef0bde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:15,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:15,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:15,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:15,316 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40188, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:15,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ee8705e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:15,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:15,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:15,320 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:15,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:15,322 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ed3ae80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:15,324 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:15,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:15,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:15,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31cc6a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:15,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:15,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:15,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:15,329 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40206, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:15,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@264ac482, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:15,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:15,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:15,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:15,337 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49698, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:15,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:15,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:15,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:15,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:15,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T16:36:15,344 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:15,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:15,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-13T16:36:15,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-13T16:36:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T16:36:15,347 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:15,348 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:15,351 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:15,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742193_1369 (size=152) 2024-11-13T16:36:15,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742193_1369 (size=152) 2024-11-13T16:36:15,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742193_1369 (size=152) 2024-11-13T16:36:15,376 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:15,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9281e5cd6679c218f14227797e5659}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 33bde9964e83af90386f782c94c4e709}] 2024-11-13T16:36:15,377 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:15,377 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T16:36:15,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-13T16:36:15,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-13T16:36:15,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:15,530 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing 6d9281e5cd6679c218f14227797e5659 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-13T16:36:15,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:15,531 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing 33bde9964e83af90386f782c94c4e709 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-13T16:36:15,559 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111381ac59b4efdf4310bc2657a850a1de51_33bde9964e83af90386f782c94c4e709 is 71, key is 15300c3726a2a339c58a12d074bdaaa5/cf:q/1731515775279/Put/seqid=0 2024-11-13T16:36:15,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411136f7704abd8024beb8ff8236c01c719a8_6d9281e5cd6679c218f14227797e5659 is 69, key is 056eed51318c6e93ae7b5448366babaf1/cf:q/1731515775273/Put/seqid=0 2024-11-13T16:36:15,586 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:36:15,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742195_1371 (size=4964) 2024-11-13T16:36:15,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742195_1371 (size=4964) 2024-11-13T16:36:15,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742195_1371 (size=4964) 2024-11-13T16:36:15,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742194_1370 (size=8311) 2024-11-13T16:36:15,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742194_1370 (size=8311) 2024-11-13T16:36:15,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742194_1370 (size=8311) 2024-11-13T16:36:15,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:15,617 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411136f7704abd8024beb8ff8236c01c719a8_6d9281e5cd6679c218f14227797e5659 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e202411136f7704abd8024beb8ff8236c01c719a8_6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/.tmp/cf/8c5ef04fe1fe43359538e427188653c1, store: [table=testExportExpiredSnapshot family=cf region=6d9281e5cd6679c218f14227797e5659] 2024-11-13T16:36:15,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/.tmp/cf/8c5ef04fe1fe43359538e427188653c1 is 202, key is 056eed51318c6e93ae7b5448366babaf1/cf:q/1731515775273/Put/seqid=0 2024-11-13T16:36:15,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T16:36:15,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742196_1372 (size=5490) 2024-11-13T16:36:15,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742196_1372 (size=5490) 2024-11-13T16:36:15,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742196_1372 (size=5490) 2024-11-13T16:36:15,780 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/.tmp/cf/8c5ef04fe1fe43359538e427188653c1 2024-11-13T16:36:15,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/.tmp/cf/8c5ef04fe1fe43359538e427188653c1 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/cf/8c5ef04fe1fe43359538e427188653c1 2024-11-13T16:36:15,872 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/cf/8c5ef04fe1fe43359538e427188653c1, entries=1, sequenceid=5, filesize=5.4 K 2024-11-13T16:36:15,875 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 6d9281e5cd6679c218f14227797e5659 in 345ms, sequenceid=5, compaction requested=false 2024-11-13T16:36:15,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-13T16:36:15,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for 6d9281e5cd6679c218f14227797e5659: 2024-11-13T16:36:15,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. for snapshot-testExportExpiredSnapshot completed. 2024-11-13T16:36:15,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T16:36:15,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:15,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/cf/8c5ef04fe1fe43359538e427188653c1] hfiles 2024-11-13T16:36:15,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/cf/8c5ef04fe1fe43359538e427188653c1 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T16:36:15,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742197_1373 (size=103) 2024-11-13T16:36:15,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742197_1373 (size=103) 2024-11-13T16:36:15,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742197_1373 (size=103) 2024-11-13T16:36:15,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:36:15,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-13T16:36:15,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-13T16:36:15,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,948 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:15,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6d9281e5cd6679c218f14227797e5659 in 575 msec 2024-11-13T16:36:15,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T16:36:16,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:16,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024111381ac59b4efdf4310bc2657a850a1de51_33bde9964e83af90386f782c94c4e709 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024111381ac59b4efdf4310bc2657a850a1de51_33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:16,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/.tmp/cf/4550248c23a94f7e986a4b926ffb1882, store: [table=testExportExpiredSnapshot family=cf region=33bde9964e83af90386f782c94c4e709] 2024-11-13T16:36:16,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/.tmp/cf/4550248c23a94f7e986a4b926ffb1882 is 202, key is 16e09d1b6238654a2d6d28e7bf6ef5e7f/cf:q/1731515775279/Put/seqid=0 2024-11-13T16:36:16,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742198_1374 (size=15057) 2024-11-13T16:36:16,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742198_1374 (size=15057) 2024-11-13T16:36:16,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742198_1374 (size=15057) 2024-11-13T16:36:16,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/.tmp/cf/4550248c23a94f7e986a4b926ffb1882 2024-11-13T16:36:16,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/.tmp/cf/4550248c23a94f7e986a4b926ffb1882 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/cf/4550248c23a94f7e986a4b926ffb1882 2024-11-13T16:36:16,154 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/cf/4550248c23a94f7e986a4b926ffb1882, entries=49, sequenceid=5, filesize=14.7 K 2024-11-13T16:36:16,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 33bde9964e83af90386f782c94c4e709 in 628ms, sequenceid=5, compaction requested=false 2024-11-13T16:36:16,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for 33bde9964e83af90386f782c94c4e709: 2024-11-13T16:36:16,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. for snapshot-testExportExpiredSnapshot completed. 2024-11-13T16:36:16,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T16:36:16,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:16,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/cf/4550248c23a94f7e986a4b926ffb1882] hfiles 2024-11-13T16:36:16,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/cf/4550248c23a94f7e986a4b926ffb1882 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-13T16:36:16,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742199_1375 (size=103) 2024-11-13T16:36:16,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742199_1375 (size=103) 2024-11-13T16:36:16,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742199_1375 (size=103) 2024-11-13T16:36:16,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:36:16,212 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-13T16:36:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-13T16:36:16,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:16,213 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:16,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=147 2024-11-13T16:36:16,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 33bde9964e83af90386f782c94c4e709 in 838 msec 2024-11-13T16:36:16,218 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:16,219 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:16,224 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:16,224 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:16,224 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:16,226 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024111381ac59b4efdf4310bc2657a850a1de51_33bde9964e83af90386f782c94c4e709, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e202411136f7704abd8024beb8ff8236c01c719a8_6d9281e5cd6679c218f14227797e5659] hfiles 2024-11-13T16:36:16,226 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024111381ac59b4efdf4310bc2657a850a1de51_33bde9964e83af90386f782c94c4e709 2024-11-13T16:36:16,226 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e202411136f7704abd8024beb8ff8236c01c719a8_6d9281e5cd6679c218f14227797e5659 2024-11-13T16:36:16,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742200_1376 (size=287) 2024-11-13T16:36:16,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742200_1376 (size=287) 2024-11-13T16:36:16,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742200_1376 (size=287) 2024-11-13T16:36:16,242 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:16,242 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-13T16:36:16,243 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-13T16:36:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742201_1377 (size=935) 2024-11-13T16:36:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742201_1377 (size=935) 2024-11-13T16:36:16,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742201_1377 (size=935) 2024-11-13T16:36:16,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T16:36:16,699 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:16,715 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:16,715 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-13T16:36:16,717 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:16,717 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-13T16:36:16,719 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 1.3730 sec 2024-11-13T16:36:17,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-13T16:36:17,519 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-13T16:36:17,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-13T16:36:17,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-13T16:36:17,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-13T16:36:17,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-13T16:36:17,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-13T16:36:17,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-13T16:36:20,279 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 22a119933ccc4c3eb35919c2f4b07619 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:36:20,279 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6d9281e5cd6679c218f14227797e5659 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:36:20,279 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5b965f308a04d32d777100e414299207 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:36:20,279 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 33bde9964e83af90386f782c94c4e709 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:36:20,496 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:36:27,535 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515787535 2024-11-13T16:36:27,536 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515787535, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515787535, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:27,583 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:27,583 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515787535, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515787535/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-13T16:36:27,586 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:36:27,588 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:36:27,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportExpiredSnapshot 2024-11-13T16:36:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:27,619 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515787619"}]},"ts":"1731515787619"} 2024-11-13T16:36:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T16:36:27,623 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-13T16:36:27,623 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-13T16:36:27,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-13T16:36:27,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, UNASSIGN}] 2024-11-13T16:36:27,640 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, UNASSIGN 2024-11-13T16:36:27,640 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, UNASSIGN 2024-11-13T16:36:27,649 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=5b965f308a04d32d777100e414299207, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:27,649 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=22a119933ccc4c3eb35919c2f4b07619, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:27,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, UNASSIGN because future has completed 2024-11-13T16:36:27,657 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:27,657 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5b965f308a04d32d777100e414299207, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:27,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, UNASSIGN because future has completed 2024-11-13T16:36:27,662 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:27,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22a119933ccc4c3eb35919c2f4b07619, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T16:36:27,816 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 5b965f308a04d32d777100e414299207 2024-11-13T16:36:27,817 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:27,817 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 5b965f308a04d32d777100e414299207, disabling compactions & flushes 2024-11-13T16:36:27,817 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:27,817 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:27,817 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. after waiting 0 ms 2024-11-13T16:36:27,817 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:27,840 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:27,840 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:27,840 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing 22a119933ccc4c3eb35919c2f4b07619, disabling compactions & flushes 2024-11-13T16:36:27,841 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:27,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:27,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. after waiting 0 ms 2024-11-13T16:36:27,841 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:27,854 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:36:27,858 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:27,859 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207. 2024-11-13T16:36:27,859 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 5b965f308a04d32d777100e414299207: Waiting for close lock at 1731515787817Running coprocessor pre-close hooks at 1731515787817Disabling compacts and flushes for region at 1731515787817Disabling writes for close at 1731515787817Writing region close event to WAL at 1731515787821 (+4 ms)Running coprocessor post-close hooks at 1731515787858 (+37 ms)Closed at 1731515787858 2024-11-13T16:36:27,865 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 5b965f308a04d32d777100e414299207 2024-11-13T16:36:27,870 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:36:27,871 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:27,871 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619. 2024-11-13T16:36:27,872 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for 22a119933ccc4c3eb35919c2f4b07619: Waiting for close lock at 1731515787840Running coprocessor pre-close hooks at 1731515787840Disabling compacts and flushes for region at 1731515787840Disabling writes for close at 1731515787841 (+1 ms)Writing region close event to WAL at 1731515787863 (+22 ms)Running coprocessor post-close hooks at 1731515787871 (+8 ms)Closed at 1731515787871 2024-11-13T16:36:27,874 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=5b965f308a04d32d777100e414299207, regionState=CLOSED 2024-11-13T16:36:27,876 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed 22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:27,878 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=22a119933ccc4c3eb35919c2f4b07619, regionState=CLOSED 2024-11-13T16:36:27,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5b965f308a04d32d777100e414299207, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:27,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 22a119933ccc4c3eb35919c2f4b07619, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:27,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-13T16:36:27,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 5b965f308a04d32d777100e414299207, server=f8f85b0bb1d8,38523,1731515529292 in 239 msec 2024-11-13T16:36:27,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-11-13T16:36:27,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure 22a119933ccc4c3eb35919c2f4b07619, server=f8f85b0bb1d8,38437,1731515528789 in 243 msec 2024-11-13T16:36:27,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=151 2024-11-13T16:36:27,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=22a119933ccc4c3eb35919c2f4b07619, UNASSIGN in 285 msec 2024-11-13T16:36:27,931 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=5b965f308a04d32d777100e414299207, UNASSIGN in 281 msec 2024-11-13T16:36:27,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T16:36:27,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-13T16:36:27,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 306 msec 2024-11-13T16:36:27,947 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515787947"}]},"ts":"1731515787947"} 2024-11-13T16:36:27,955 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-13T16:36:27,956 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-13T16:36:27,972 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 364 msec 2024-11-13T16:36:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-13T16:36:28,252 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T16:36:28,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,261 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,272 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,273 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,288 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:28,291 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207 2024-11-13T16:36:28,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T16:36:28,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,307 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T16:36:28,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T16:36:28,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-13T16:36:28,312 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/recovered.edits] 2024-11-13T16:36:28,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:28,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:28,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:28,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:28,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:28,319 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/recovered.edits] 2024-11-13T16:36:28,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:28,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-13T16:36:28,329 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:28,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:28,333 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf/6abf8651ede14f0c84238ddc990b7115 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/cf/6abf8651ede14f0c84238ddc990b7115 2024-11-13T16:36:28,337 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf/1b90b94f220d4530b2cdbd3e145ba36c to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/cf/1b90b94f220d4530b2cdbd3e145ba36c 2024-11-13T16:36:28,337 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619/recovered.edits/9.seqid 2024-11-13T16:36:28,338 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:28,341 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207/recovered.edits/9.seqid 2024-11-13T16:36:28,341 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportExpiredSnapshot/5b965f308a04d32d777100e414299207 2024-11-13T16:36:28,342 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-13T16:36:28,342 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-13T16:36:28,344 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-11-13T16:36:28,347 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411136f370823da1545e08ef1da503f1ed2a0_5b965f308a04d32d777100e414299207 2024-11-13T16:36:28,349 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241113c3fb264396c34aee93b4d21b967230da_22a119933ccc4c3eb35919c2f4b07619 2024-11-13T16:36:28,349 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-13T16:36:28,355 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,361 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-13T16:36:28,366 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-13T16:36:28,369 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,369 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-13T16:36:28,370 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515788369"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:28,370 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515788369"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:28,376 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:36:28,376 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 22a119933ccc4c3eb35919c2f4b07619, NAME => 'testtb-testExportExpiredSnapshot,,1731515770936.22a119933ccc4c3eb35919c2f4b07619.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5b965f308a04d32d777100e414299207, NAME => 'testtb-testExportExpiredSnapshot,1,1731515770936.5b965f308a04d32d777100e414299207.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:36:28,376 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-13T16:36:28,376 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515788376"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:28,384 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-13T16:36:28,395 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 139 msec 2024-11-13T16:36:28,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-13T16:36:28,433 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-13T16:36:28,433 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-13T16:36:28,476 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-13T16:36:28,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-13T16:36:28,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-13T16:36:28,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-13T16:36:28,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-13T16:36:28,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-13T16:36:28,558 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=794 (was 799), OpenFileDescriptor=791 (was 804), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1350 (was 1385), ProcessCount=11 (was 17), AvailableMemoryMB=4670 (was 2711) - AvailableMemoryMB LEAK? - 2024-11-13T16:36:28,558 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-11-13T16:36:28,607 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=794, OpenFileDescriptor=791, MaxFileDescriptor=1048576, SystemLoadAverage=1350, ProcessCount=11, AvailableMemoryMB=4666 2024-11-13T16:36:28,607 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-11-13T16:36:28,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:36:28,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:28,624 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:36:28,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-13T16:36:28,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T16:36:28,629 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:36:28,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742202_1378 (size=448) 2024-11-13T16:36:28,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742202_1378 (size=448) 2024-11-13T16:36:28,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742202_1378 (size=448) 2024-11-13T16:36:28,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T16:36:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T16:36:29,063 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 147e2f1c20699f605099c511acd2060d, NAME => 'testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:29,066 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => dcc0cc079b883b52e728c119e130ffd0, NAME => 'testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742204_1380 (size=73) 2024-11-13T16:36:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742204_1380 (size=73) 2024-11-13T16:36:29,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742204_1380 (size=73) 2024-11-13T16:36:29,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742203_1379 (size=73) 2024-11-13T16:36:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742203_1379 (size=73) 2024-11-13T16:36:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742203_1379 (size=73) 2024-11-13T16:36:29,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T16:36:29,530 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:29,531 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing dcc0cc079b883b52e728c119e130ffd0, disabling compactions & flushes 2024-11-13T16:36:29,531 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,531 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,531 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. after waiting 0 ms 2024-11-13T16:36:29,531 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,531 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,531 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for dcc0cc079b883b52e728c119e130ffd0: Waiting for close lock at 1731515789531Disabling compacts and flushes for region at 1731515789531Disabling writes for close at 1731515789531Writing region close event to WAL at 1731515789531Closed at 1731515789531 2024-11-13T16:36:29,559 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:29,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 147e2f1c20699f605099c511acd2060d, disabling compactions & flushes 2024-11-13T16:36:29,560 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. after waiting 0 ms 2024-11-13T16:36:29,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,560 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,560 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 147e2f1c20699f605099c511acd2060d: Waiting for close lock at 1731515789560Disabling compacts and flushes for region at 1731515789560Disabling writes for close at 1731515789560Writing region close event to WAL at 1731515789560Closed at 1731515789560 2024-11-13T16:36:29,562 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:36:29,563 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731515789562"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515789562"}]},"ts":"1731515789562"} 2024-11-13T16:36:29,563 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731515789562"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515789562"}]},"ts":"1731515789562"} 2024-11-13T16:36:29,568 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:36:29,570 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:36:29,570 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515789570"}]},"ts":"1731515789570"} 2024-11-13T16:36:29,575 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-13T16:36:29,575 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:36:29,577 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:36:29,577 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:36:29,577 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:36:29,577 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:36:29,578 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:36:29,578 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:36:29,578 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:36:29,578 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:36:29,578 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:36:29,578 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:36:29,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, ASSIGN}] 2024-11-13T16:36:29,588 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, ASSIGN 2024-11-13T16:36:29,589 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, ASSIGN 2024-11-13T16:36:29,595 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:36:29,599 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:36:29,749 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:36:29,749 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=dcc0cc079b883b52e728c119e130ffd0, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:29,749 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=147e2f1c20699f605099c511acd2060d, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:29,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, ASSIGN because future has completed 2024-11-13T16:36:29,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure dcc0cc079b883b52e728c119e130ffd0, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:29,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, ASSIGN because future has completed 2024-11-13T16:36:29,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 147e2f1c20699f605099c511acd2060d, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T16:36:29,907 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,907 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,907 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => 147e2f1c20699f605099c511acd2060d, NAME => 'testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:36:29,907 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => dcc0cc079b883b52e728c119e130ffd0, NAME => 'testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:36:29,907 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. service=AccessControlService 2024-11-13T16:36:29,907 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. service=AccessControlService 2024-11-13T16:36:29,907 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:29,907 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,908 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,909 INFO [StoreOpener-dcc0cc079b883b52e728c119e130ffd0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,909 INFO [StoreOpener-147e2f1c20699f605099c511acd2060d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,910 INFO [StoreOpener-dcc0cc079b883b52e728c119e130ffd0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dcc0cc079b883b52e728c119e130ffd0 columnFamilyName cf 2024-11-13T16:36:29,910 INFO [StoreOpener-147e2f1c20699f605099c511acd2060d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 147e2f1c20699f605099c511acd2060d columnFamilyName cf 2024-11-13T16:36:29,911 DEBUG [StoreOpener-147e2f1c20699f605099c511acd2060d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:29,912 INFO [StoreOpener-147e2f1c20699f605099c511acd2060d-1 {}] regionserver.HStore(327): Store=147e2f1c20699f605099c511acd2060d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:29,912 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,913 DEBUG [StoreOpener-dcc0cc079b883b52e728c119e130ffd0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:29,913 INFO [StoreOpener-dcc0cc079b883b52e728c119e130ffd0-1 {}] regionserver.HStore(327): Store=dcc0cc079b883b52e728c119e130ffd0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:29,913 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,914 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,916 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,917 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,918 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,918 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,918 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,919 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,919 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,922 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,922 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,925 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:29,926 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened 147e2f1c20699f605099c511acd2060d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71991045, jitterRate=0.07275016605854034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:29,926 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:29,929 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for 147e2f1c20699f605099c511acd2060d: Running coprocessor pre-open hook at 1731515789908Writing region info on filesystem at 1731515789908Initializing all the Stores at 1731515789909 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515789909Cleaning up temporary data from old regions at 1731515789919 (+10 ms)Running coprocessor post-open hooks at 1731515789926 (+7 ms)Region opened successfully at 1731515789929 (+3 ms) 2024-11-13T16:36:29,931 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d., pid=161, masterSystemTime=1731515789904 2024-11-13T16:36:29,934 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,934 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:29,935 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:29,936 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=147e2f1c20699f605099c511acd2060d, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:29,935 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened dcc0cc079b883b52e728c119e130ffd0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74088046, jitterRate=0.10399791598320007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:29,936 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:29,936 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for dcc0cc079b883b52e728c119e130ffd0: Running coprocessor pre-open hook at 1731515789908Writing region info on filesystem at 1731515789908Initializing all the Stores at 1731515789909 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515789909Cleaning up temporary data from old regions at 1731515789918 (+9 ms)Running coprocessor post-open hooks at 1731515789936 (+18 ms)Region opened successfully at 1731515789936 2024-11-13T16:36:29,938 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0., pid=160, masterSystemTime=1731515789904 2024-11-13T16:36:29,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 147e2f1c20699f605099c511acd2060d, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:29,945 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,945 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:29,955 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=dcc0cc079b883b52e728c119e130ffd0, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:29,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=158 2024-11-13T16:36:29,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure 147e2f1c20699f605099c511acd2060d, server=f8f85b0bb1d8,38437,1731515528789 in 190 msec 2024-11-13T16:36:29,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure dcc0cc079b883b52e728c119e130ffd0, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:29,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, ASSIGN in 384 msec 2024-11-13T16:36:29,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-13T16:36:29,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure dcc0cc079b883b52e728c119e130ffd0, server=f8f85b0bb1d8,38523,1731515529292 in 219 msec 2024-11-13T16:36:30,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-13T16:36:30,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, ASSIGN in 421 msec 2024-11-13T16:36:30,009 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:36:30,010 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515790009"}]},"ts":"1731515790009"} 2024-11-13T16:36:30,026 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-13T16:36:30,037 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:36:30,037 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-13T16:36:30,054 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T16:36:30,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:30,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:30,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:30,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:30,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:30,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,140 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:30,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 1.5270 sec 2024-11-13T16:36:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-13T16:36:30,791 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T16:36:30,791 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:30,800 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-13T16:36:30,800 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:30,800 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:30,803 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:30,814 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:30,843 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:30,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:36:30,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515790860 (current time:1731515790860). 2024-11-13T16:36:30,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:36:30,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-13T16:36:30,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:30,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@227b020b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:30,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:30,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:30,867 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:30,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:30,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:30,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@234572e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:30,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:30,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:30,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:30,873 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:30,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@604b0133, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:30,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:30,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:30,882 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48082, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:30,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:30,889 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:30,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5240dc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:30,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:30,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:30,892 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:30,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:30,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:30,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e34833e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:30,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:30,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:30,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:30,898 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58372, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:30,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28ea0eb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:30,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:30,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:30,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:30,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48096, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:30,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:30,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:30,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37566, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:30,921 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:30,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:30,921 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:30,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T16:36:30,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:30,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:36:30,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-13T16:36:30,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T16:36:30,929 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:30,935 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:30,939 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:31,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742205_1381 (size=185) 2024-11-13T16:36:31,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742205_1381 (size=185) 2024-11-13T16:36:31,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742205_1381 (size=185) 2024-11-13T16:36:31,007 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:31,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0}] 2024-11-13T16:36:31,011 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:31,018 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:31,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T16:36:31,122 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-13T16:36:31,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-13T16:36:31,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:31,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for 147e2f1c20699f605099c511acd2060d: 2024-11-13T16:36:31,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-13T16:36:31,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:31,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:31,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:36:31,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-13T16:36:31,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:31,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for dcc0cc079b883b52e728c119e130ffd0: 2024-11-13T16:36:31,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-13T16:36:31,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:31,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:31,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:36:31,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742206_1382 (size=76) 2024-11-13T16:36:31,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742206_1382 (size=76) 2024-11-13T16:36:31,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742206_1382 (size=76) 2024-11-13T16:36:31,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:31,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-13T16:36:31,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-13T16:36:31,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:31,251 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:31,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T16:36:31,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0 in 246 msec 2024-11-13T16:36:31,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742207_1383 (size=76) 2024-11-13T16:36:31,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742207_1383 (size=76) 2024-11-13T16:36:31,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742207_1383 (size=76) 2024-11-13T16:36:31,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:31,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-13T16:36:31,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-13T16:36:31,282 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:31,282 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:31,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-13T16:36:31,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d in 284 msec 2024-11-13T16:36:31,310 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:31,312 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:31,327 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:31,327 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:31,327 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:31,327 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:36:31,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742208_1384 (size=68) 2024-11-13T16:36:31,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742208_1384 (size=68) 2024-11-13T16:36:31,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742208_1384 (size=68) 2024-11-13T16:36:31,389 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:31,389 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:31,390 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:31,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742209_1385 (size=673) 2024-11-13T16:36:31,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742209_1385 (size=673) 2024-11-13T16:36:31,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742209_1385 (size=673) 2024-11-13T16:36:31,497 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:31,514 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:31,527 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:31,529 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:31,529 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-13T16:36:31,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 605 msec 2024-11-13T16:36:31,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-13T16:36:31,563 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T16:36:31,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:31,580 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:31,582 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:31,586 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-13T16:36:31,586 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:31,587 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:31,597 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:31,611 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:31,624 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-13T16:36:31,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:36:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515791629 (current time:1731515791629). 2024-11-13T16:36:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:36:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-13T16:36:31,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@caac140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:31,631 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:31,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:31,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:31,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a0ed48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:31,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:31,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:31,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:31,634 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:31,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cbc2eb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:31,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:31,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:31,645 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:31,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:31,649 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36ce0d9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:31,660 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:31,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:31,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:31,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a42f8c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:31,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:31,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:31,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:31,663 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58422, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:31,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a7d343, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:31,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:31,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:31,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:31,673 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48108, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:31,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:31,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:31,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:31,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:31,687 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-13T16:36:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-13T16:36:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-13T16:36:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T16:36:31,715 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:31,723 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:31,737 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T16:36:31,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742210_1386 (size=180) 2024-11-13T16:36:31,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742210_1386 (size=180) 2024-11-13T16:36:31,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742210_1386 (size=180) 2024-11-13T16:36:31,838 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:31,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0}] 2024-11-13T16:36:31,851 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:31,852 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:32,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-13T16:36:32,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:32,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-13T16:36:32,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:32,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing dcc0cc079b883b52e728c119e130ffd0 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-13T16:36:32,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing 147e2f1c20699f605099c511acd2060d 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-13T16:36:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T16:36:32,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d is 71, key is 01eb854c41ac7a936a6eb0b11b893a67/cf:q/1731515791580/Put/seqid=0 2024-11-13T16:36:32,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0 is 71, key is 169aeac41557e377f62e4f7422b11ab2/cf:q/1731515791575/Put/seqid=0 2024-11-13T16:36:32,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742211_1387 (size=5381) 2024-11-13T16:36:32,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742211_1387 (size=5381) 2024-11-13T16:36:32,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742211_1387 (size=5381) 2024-11-13T16:36:32,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:32,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742212_1388 (size=7892) 2024-11-13T16:36:32,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742212_1388 (size=7892) 2024-11-13T16:36:32,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742212_1388 (size=7892) 2024-11-13T16:36:32,236 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:32,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:32,251 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/.tmp/cf/cf7c85924a4947639a3b37ab6a1559fa, store: [table=testtb-testEmptyExportFileSystemState family=cf region=147e2f1c20699f605099c511acd2060d] 2024-11-13T16:36:32,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/.tmp/cf/cf7c85924a4947639a3b37ab6a1559fa is 214, key is 09bee7f1cb3a602f93b2e550e66dcfe6a/cf:q/1731515791580/Put/seqid=0 2024-11-13T16:36:32,296 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:32,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/.tmp/cf/85443c338b3842bd935eaf5c8b41329c, store: [table=testtb-testEmptyExportFileSystemState family=cf region=dcc0cc079b883b52e728c119e130ffd0] 2024-11-13T16:36:32,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/.tmp/cf/85443c338b3842bd935eaf5c8b41329c is 214, key is 13e8cfe6098038c21d37c8161d2133fd2/cf:q/1731515791575/Put/seqid=0 2024-11-13T16:36:32,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742213_1389 (size=6784) 2024-11-13T16:36:32,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742213_1389 (size=6784) 2024-11-13T16:36:32,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742213_1389 (size=6784) 2024-11-13T16:36:32,323 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/.tmp/cf/cf7c85924a4947639a3b37ab6a1559fa 2024-11-13T16:36:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T16:36:32,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742214_1390 (size=14399) 2024-11-13T16:36:32,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742214_1390 (size=14399) 2024-11-13T16:36:32,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742214_1390 (size=14399) 2024-11-13T16:36:32,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/.tmp/cf/cf7c85924a4947639a3b37ab6a1559fa as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf/cf7c85924a4947639a3b37ab6a1559fa 2024-11-13T16:36:32,397 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/.tmp/cf/85443c338b3842bd935eaf5c8b41329c 2024-11-13T16:36:32,418 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf/cf7c85924a4947639a3b37ab6a1559fa, entries=7, sequenceid=6, filesize=6.6 K 2024-11-13T16:36:32,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/.tmp/cf/85443c338b3842bd935eaf5c8b41329c as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf/85443c338b3842bd935eaf5c8b41329c 2024-11-13T16:36:32,433 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 147e2f1c20699f605099c511acd2060d in 427ms, sequenceid=6, compaction requested=false 2024-11-13T16:36:32,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for 147e2f1c20699f605099c511acd2060d: 2024-11-13T16:36:32,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-13T16:36:32,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:32,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:32,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf/cf7c85924a4947639a3b37ab6a1559fa] hfiles 2024-11-13T16:36:32,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf/cf7c85924a4947639a3b37ab6a1559fa for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:32,441 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf/85443c338b3842bd935eaf5c8b41329c, entries=43, sequenceid=6, filesize=14.1 K 2024-11-13T16:36:32,456 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for dcc0cc079b883b52e728c119e130ffd0 in 451ms, sequenceid=6, compaction requested=false 2024-11-13T16:36:32,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for dcc0cc079b883b52e728c119e130ffd0: 2024-11-13T16:36:32,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-13T16:36:32,457 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:32,457 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:32,457 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf/85443c338b3842bd935eaf5c8b41329c] hfiles 2024-11-13T16:36:32,457 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf/85443c338b3842bd935eaf5c8b41329c for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742216_1392 (size=115) 2024-11-13T16:36:32,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742216_1392 (size=115) 2024-11-13T16:36:32,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742216_1392 (size=115) 2024-11-13T16:36:32,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:32,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-13T16:36:32,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-13T16:36:32,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:32,544 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:32,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742215_1391 (size=115) 2024-11-13T16:36:32,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742215_1391 (size=115) 2024-11-13T16:36:32,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742215_1391 (size=115) 2024-11-13T16:36:32,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dcc0cc079b883b52e728c119e130ffd0 in 713 msec 2024-11-13T16:36:32,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T16:36:32,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:32,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-13T16:36:32,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-13T16:36:32,948 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:32,948 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:32,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-13T16:36:32,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 147e2f1c20699f605099c511acd2060d in 1.1110 sec 2024-11-13T16:36:32,952 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:32,955 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:32,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:32,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:32,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:32,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d] hfiles 2024-11-13T16:36:32,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:32,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:33,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742217_1393 (size=299) 2024-11-13T16:36:33,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742217_1393 (size=299) 2024-11-13T16:36:33,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742217_1393 (size=299) 2024-11-13T16:36:33,032 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:33,032 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:33,034 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:33,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742218_1394 (size=983) 2024-11-13T16:36:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742218_1394 (size=983) 2024-11-13T16:36:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742218_1394 (size=983) 2024-11-13T16:36:33,409 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:36:33,539 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:33,599 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:33,606 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:33,620 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:33,620 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-13T16:36:33,630 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 1.9250 sec 2024-11-13T16:36:33,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-13T16:36:33,893 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T16:36:33,893 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893 2024-11-13T16:36:33,893 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:33,946 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:33,946 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:33,948 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:36:33,975 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:34,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742219_1395 (size=185) 2024-11-13T16:36:34,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742219_1395 (size=185) 2024-11-13T16:36:34,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742219_1395 (size=185) 2024-11-13T16:36:34,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742220_1396 (size=673) 2024-11-13T16:36:34,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742220_1396 (size=673) 2024-11-13T16:36:34,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742220_1396 (size=673) 2024-11-13T16:36:34,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:34,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:34,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-17471527656333210210.jar 2024-11-13T16:36:35,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-5186117011755261578.jar 2024-11-13T16:36:35,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:36:35,402 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:36:35,402 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:36:35,402 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:36:35,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:36:35,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:36:35,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:36:35,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:36:35,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:36:35,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:36:35,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:36:35,405 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:36:35,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:36:35,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:36:35,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:36:35,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:36:35,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:36:35,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:36:35,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:36:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742221_1397 (size=131440) 2024-11-13T16:36:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742221_1397 (size=131440) 2024-11-13T16:36:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742221_1397 (size=131440) 2024-11-13T16:36:35,908 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:36:36,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742222_1398 (size=4188619) 2024-11-13T16:36:36,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742222_1398 (size=4188619) 2024-11-13T16:36:36,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742222_1398 (size=4188619) 2024-11-13T16:36:36,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742223_1399 (size=1323991) 2024-11-13T16:36:36,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742223_1399 (size=1323991) 2024-11-13T16:36:36,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742223_1399 (size=1323991) 2024-11-13T16:36:36,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742224_1400 (size=903731) 2024-11-13T16:36:36,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742224_1400 (size=903731) 2024-11-13T16:36:36,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742224_1400 (size=903731) 2024-11-13T16:36:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742225_1401 (size=8360083) 2024-11-13T16:36:36,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742225_1401 (size=8360083) 2024-11-13T16:36:36,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742225_1401 (size=8360083) 2024-11-13T16:36:36,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742226_1402 (size=1877034) 2024-11-13T16:36:36,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742226_1402 (size=1877034) 2024-11-13T16:36:36,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742226_1402 (size=1877034) 2024-11-13T16:36:36,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742227_1403 (size=77835) 2024-11-13T16:36:36,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742227_1403 (size=77835) 2024-11-13T16:36:36,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742227_1403 (size=77835) 2024-11-13T16:36:36,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742228_1404 (size=30949) 2024-11-13T16:36:36,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742228_1404 (size=30949) 2024-11-13T16:36:36,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742228_1404 (size=30949) 2024-11-13T16:36:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742229_1405 (size=1597327) 2024-11-13T16:36:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742229_1405 (size=1597327) 2024-11-13T16:36:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742229_1405 (size=1597327) 2024-11-13T16:36:36,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742230_1406 (size=4695811) 2024-11-13T16:36:36,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742230_1406 (size=4695811) 2024-11-13T16:36:36,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742230_1406 (size=4695811) 2024-11-13T16:36:36,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742231_1407 (size=232957) 2024-11-13T16:36:36,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742231_1407 (size=232957) 2024-11-13T16:36:36,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742231_1407 (size=232957) 2024-11-13T16:36:36,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742232_1408 (size=127628) 2024-11-13T16:36:36,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742232_1408 (size=127628) 2024-11-13T16:36:36,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742232_1408 (size=127628) 2024-11-13T16:36:36,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742233_1409 (size=20406) 2024-11-13T16:36:36,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742233_1409 (size=20406) 2024-11-13T16:36:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742233_1409 (size=20406) 2024-11-13T16:36:36,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742234_1410 (size=5175431) 2024-11-13T16:36:36,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742234_1410 (size=5175431) 2024-11-13T16:36:36,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742234_1410 (size=5175431) 2024-11-13T16:36:36,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742235_1411 (size=217634) 2024-11-13T16:36:36,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742235_1411 (size=217634) 2024-11-13T16:36:36,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742235_1411 (size=217634) 2024-11-13T16:36:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742236_1412 (size=1832290) 2024-11-13T16:36:36,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742236_1412 (size=1832290) 2024-11-13T16:36:36,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742236_1412 (size=1832290) 2024-11-13T16:36:37,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742237_1413 (size=6424544) 2024-11-13T16:36:37,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742237_1413 (size=6424544) 2024-11-13T16:36:37,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742237_1413 (size=6424544) 2024-11-13T16:36:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742238_1414 (size=322274) 2024-11-13T16:36:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742238_1414 (size=322274) 2024-11-13T16:36:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742238_1414 (size=322274) 2024-11-13T16:36:37,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742239_1415 (size=503880) 2024-11-13T16:36:37,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742239_1415 (size=503880) 2024-11-13T16:36:37,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742239_1415 (size=503880) 2024-11-13T16:36:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742240_1416 (size=29229) 2024-11-13T16:36:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742240_1416 (size=29229) 2024-11-13T16:36:37,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742240_1416 (size=29229) 2024-11-13T16:36:37,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742241_1417 (size=24096) 2024-11-13T16:36:37,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742241_1417 (size=24096) 2024-11-13T16:36:37,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742241_1417 (size=24096) 2024-11-13T16:36:37,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742242_1418 (size=440392) 2024-11-13T16:36:37,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742242_1418 (size=440392) 2024-11-13T16:36:37,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742242_1418 (size=440392) 2024-11-13T16:36:37,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742243_1419 (size=111872) 2024-11-13T16:36:37,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742243_1419 (size=111872) 2024-11-13T16:36:37,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742243_1419 (size=111872) 2024-11-13T16:36:37,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742244_1420 (size=45609) 2024-11-13T16:36:37,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742244_1420 (size=45609) 2024-11-13T16:36:37,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742244_1420 (size=45609) 2024-11-13T16:36:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742245_1421 (size=136454) 2024-11-13T16:36:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742245_1421 (size=136454) 2024-11-13T16:36:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742245_1421 (size=136454) 2024-11-13T16:36:37,455 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:36:37,461 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-13T16:36:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742246_1422 (size=7) 2024-11-13T16:36:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742246_1422 (size=7) 2024-11-13T16:36:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742246_1422 (size=7) 2024-11-13T16:36:37,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742247_1423 (size=10) 2024-11-13T16:36:37,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742247_1423 (size=10) 2024-11-13T16:36:37,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742247_1423 (size=10) 2024-11-13T16:36:37,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742248_1424 (size=303633) 2024-11-13T16:36:37,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742248_1424 (size=303633) 2024-11-13T16:36:37,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742248_1424 (size=303633) 2024-11-13T16:36:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-13T16:36:37,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-13T16:36:37,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-13T16:36:37,731 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:36:37,731 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:36:37,817 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0007_000001 (auth:SIMPLE) from 127.0.0.1:48346 2024-11-13T16:36:43,217 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:36:51,853 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0007_000001 (auth:SIMPLE) from 127.0.0.1:44012 2024-11-13T16:36:52,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742249_1425 (size=349259) 2024-11-13T16:36:52,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742249_1425 (size=349259) 2024-11-13T16:36:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742249_1425 (size=349259) 2024-11-13T16:36:53,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742250_1426 (size=8568) 2024-11-13T16:36:53,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742250_1426 (size=8568) 2024-11-13T16:36:53,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742250_1426 (size=8568) 2024-11-13T16:36:53,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742251_1427 (size=460) 2024-11-13T16:36:53,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742251_1427 (size=460) 2024-11-13T16:36:53,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742251_1427 (size=460) 2024-11-13T16:36:53,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742252_1428 (size=8568) 2024-11-13T16:36:53,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742252_1428 (size=8568) 2024-11-13T16:36:53,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742252_1428 (size=8568) 2024-11-13T16:36:53,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742253_1429 (size=349259) 2024-11-13T16:36:53,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742253_1429 (size=349259) 2024-11-13T16:36:53,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742253_1429 (size=349259) 2024-11-13T16:36:55,039 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:36:55,053 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:36:55,107 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:55,107 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:36:55,115 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:36:55,115 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:55,116 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-13T16:36:55,116 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-13T16:36:55,116 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:55,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-13T16:36:55,118 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515793893/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-13T16:36:55,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,176 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515815176"}]},"ts":"1731515815176"} 2024-11-13T16:36:55,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T16:36:55,192 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-13T16:36:55,193 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-13T16:36:55,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-13T16:36:55,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, UNASSIGN}] 2024-11-13T16:36:55,219 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, UNASSIGN 2024-11-13T16:36:55,220 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, UNASSIGN 2024-11-13T16:36:55,221 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=dcc0cc079b883b52e728c119e130ffd0, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:55,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, UNASSIGN because future has completed 2024-11-13T16:36:55,225 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:55,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure dcc0cc079b883b52e728c119e130ffd0, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:55,234 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=147e2f1c20699f605099c511acd2060d, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:55,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, UNASSIGN because future has completed 2024-11-13T16:36:55,244 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:36:55,244 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 147e2f1c20699f605099c511acd2060d, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:55,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T16:36:55,392 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:55,393 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:55,393 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing dcc0cc079b883b52e728c119e130ffd0, disabling compactions & flushes 2024-11-13T16:36:55,393 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:55,393 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:55,393 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. after waiting 0 ms 2024-11-13T16:36:55,393 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:55,409 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:55,409 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:36:55,409 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing 147e2f1c20699f605099c511acd2060d, disabling compactions & flushes 2024-11-13T16:36:55,409 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:55,409 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:55,409 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. after waiting 0 ms 2024-11-13T16:36:55,409 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:55,455 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:36:55,459 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:55,459 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0. 2024-11-13T16:36:55,460 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for dcc0cc079b883b52e728c119e130ffd0: Waiting for close lock at 1731515815393Running coprocessor pre-close hooks at 1731515815393Disabling compacts and flushes for region at 1731515815393Disabling writes for close at 1731515815393Writing region close event to WAL at 1731515815403 (+10 ms)Running coprocessor post-close hooks at 1731515815459 (+56 ms)Closed at 1731515815459 2024-11-13T16:36:55,463 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:36:55,465 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:36:55,465 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d. 2024-11-13T16:36:55,465 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for 147e2f1c20699f605099c511acd2060d: Waiting for close lock at 1731515815409Running coprocessor pre-close hooks at 1731515815409Disabling compacts and flushes for region at 1731515815409Disabling writes for close at 1731515815409Writing region close event to WAL at 1731515815427 (+18 ms)Running coprocessor post-close hooks at 1731515815465 (+38 ms)Closed at 1731515815465 2024-11-13T16:36:55,470 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:55,473 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed 147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:55,475 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=dcc0cc079b883b52e728c119e130ffd0, regionState=CLOSED 2024-11-13T16:36:55,483 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=147e2f1c20699f605099c511acd2060d, regionState=CLOSED 2024-11-13T16:36:55,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure dcc0cc079b883b52e728c119e130ffd0, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:55,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 147e2f1c20699f605099c511acd2060d, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:55,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T16:36:55,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-13T16:36:55,512 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure dcc0cc079b883b52e728c119e130ffd0, server=f8f85b0bb1d8,38523,1731515529292 in 268 msec 2024-11-13T16:36:55,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-13T16:36:55,516 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure 147e2f1c20699f605099c511acd2060d, server=f8f85b0bb1d8,38437,1731515528789 in 254 msec 2024-11-13T16:36:55,524 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=dcc0cc079b883b52e728c119e130ffd0, UNASSIGN in 297 msec 2024-11-13T16:36:55,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-13T16:36:55,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=147e2f1c20699f605099c511acd2060d, UNASSIGN in 301 msec 2024-11-13T16:36:55,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-13T16:36:55,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 339 msec 2024-11-13T16:36:55,571 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515815571"}]},"ts":"1731515815571"} 2024-11-13T16:36:55,579 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-13T16:36:55,579 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-13T16:36:55,594 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 427 msec 2024-11-13T16:36:55,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-13T16:36:55,823 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T16:36:55,827 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,838 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,842 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,863 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:55,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,870 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:55,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,872 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T16:36:55,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T16:36:55,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-13T16:36:55,874 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/recovered.edits] 2024-11-13T16:36:55,878 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/recovered.edits] 2024-11-13T16:36:55,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:55,882 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-11-13T16:36:55,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:55,882 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-13T16:36:55,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-13T16:36:55,884 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:55,885 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:55,885 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:55,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:55,885 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf/85443c338b3842bd935eaf5c8b41329c to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/cf/85443c338b3842bd935eaf5c8b41329c 2024-11-13T16:36:55,887 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:55,891 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0/recovered.edits/9.seqid 2024-11-13T16:36:55,892 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:55,904 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf/cf7c85924a4947639a3b37ab6a1559fa to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/cf/cf7c85924a4947639a3b37ab6a1559fa 2024-11-13T16:36:55,909 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d/recovered.edits/9.seqid 2024-11-13T16:36:55,909 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testEmptyExportFileSystemState/147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:55,910 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-13T16:36:55,912 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-13T16:36:55,913 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-11-13T16:36:55,925 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b202411135f0ae2e3f60649c192eeeb71d3aae796_dcc0cc079b883b52e728c119e130ffd0 2024-11-13T16:36:55,934 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241113ab83bde84bb84c1991a0176da66a1399_147e2f1c20699f605099c511acd2060d 2024-11-13T16:36:55,935 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-13T16:36:55,941 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,945 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-13T16:36:55,951 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-13T16:36:55,957 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,957 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-13T16:36:55,957 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515815957"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:55,957 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515815957"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:55,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:36:55,964 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 147e2f1c20699f605099c511acd2060d, NAME => 'testtb-testEmptyExportFileSystemState,,1731515788611.147e2f1c20699f605099c511acd2060d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => dcc0cc079b883b52e728c119e130ffd0, NAME => 'testtb-testEmptyExportFileSystemState,1,1731515788611.dcc0cc079b883b52e728c119e130ffd0.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:36:55,964 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-13T16:36:55,965 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515815964"}]},"ts":"9223372036854775807"} 2024-11-13T16:36:55,971 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-13T16:36:55,976 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-13T16:36:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-13T16:36:55,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 149 msec 2024-11-13T16:36:56,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-13T16:36:56,208 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-13T16:36:56,209 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-13T16:36:56,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-13T16:36:56,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:56,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-13T16:36:56,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-13T16:36:56,279 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=802 (was 794) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1007755522_1 at /127.0.0.1:48208 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:48048 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:41784 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:38409 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 102623) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1007755522_1 at /127.0.0.1:38126 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:43104 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38409 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39085 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5760 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=805 (was 791) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1549 (was 1350) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3966 (was 4666) 2024-11-13T16:36:56,279 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-11-13T16:36:56,309 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=802, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=1549, ProcessCount=14, AvailableMemoryMB=3964 2024-11-13T16:36:56,309 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-11-13T16:36:56,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:36:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:36:56,324 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:36:56,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-13T16:36:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T16:36:56,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:36:56,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742254_1430 (size=440) 2024-11-13T16:36:56,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742254_1430 (size=440) 2024-11-13T16:36:56,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742254_1430 (size=440) 2024-11-13T16:36:56,365 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e84a8e5fa28b91e9896dcf71c1fcc137, NAME => 'testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:56,366 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f3ca86f2b19766b9aaff87f74667e279, NAME => 'testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:36:56,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T16:36:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742255_1431 (size=65) 2024-11-13T16:36:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742255_1431 (size=65) 2024-11-13T16:36:56,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742255_1431 (size=65) 2024-11-13T16:36:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing e84a8e5fa28b91e9896dcf71c1fcc137, disabling compactions & flushes 2024-11-13T16:36:56,458 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. after waiting 0 ms 2024-11-13T16:36:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,459 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,459 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for e84a8e5fa28b91e9896dcf71c1fcc137: Waiting for close lock at 1731515816458Disabling compacts and flushes for region at 1731515816458Disabling writes for close at 1731515816458Writing region close event to WAL at 1731515816458Closed at 1731515816458 2024-11-13T16:36:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742256_1432 (size=65) 2024-11-13T16:36:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742256_1432 (size=65) 2024-11-13T16:36:56,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742256_1432 (size=65) 2024-11-13T16:36:56,488 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:56,488 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing f3ca86f2b19766b9aaff87f74667e279, disabling compactions & flushes 2024-11-13T16:36:56,488 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,488 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,488 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. after waiting 0 ms 2024-11-13T16:36:56,488 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,488 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,488 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for f3ca86f2b19766b9aaff87f74667e279: Waiting for close lock at 1731515816488Disabling compacts and flushes for region at 1731515816488Disabling writes for close at 1731515816488Writing region close event to WAL at 1731515816488Closed at 1731515816488 2024-11-13T16:36:56,491 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:36:56,491 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731515816491"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515816491"}]},"ts":"1731515816491"} 2024-11-13T16:36:56,491 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731515816491"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515816491"}]},"ts":"1731515816491"} 2024-11-13T16:36:56,496 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:36:56,504 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:36:56,504 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515816504"}]},"ts":"1731515816504"} 2024-11-13T16:36:56,507 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-13T16:36:56,508 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:36:56,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:36:56,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:36:56,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:36:56,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:36:56,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:36:56,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:36:56,511 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:36:56,511 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:36:56,511 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:36:56,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:36:56,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, ASSIGN}] 2024-11-13T16:36:56,519 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, ASSIGN 2024-11-13T16:36:56,519 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, ASSIGN 2024-11-13T16:36:56,522 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38523,1731515529292; forceNewPlan=false, retain=false 2024-11-13T16:36:56,522 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:36:56,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T16:36:56,672 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:36:56,673 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=f3ca86f2b19766b9aaff87f74667e279, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:56,673 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=e84a8e5fa28b91e9896dcf71c1fcc137, regionState=OPENING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:56,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, ASSIGN because future has completed 2024-11-13T16:36:56,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3ca86f2b19766b9aaff87f74667e279, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:36:56,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, ASSIGN because future has completed 2024-11-13T16:36:56,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:36:56,833 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,833 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => e84a8e5fa28b91e9896dcf71c1fcc137, NAME => 'testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:36:56,834 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. service=AccessControlService 2024-11-13T16:36:56,834 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:56,834 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,834 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:56,835 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,835 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,837 INFO [StoreOpener-e84a8e5fa28b91e9896dcf71c1fcc137-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,840 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,840 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => f3ca86f2b19766b9aaff87f74667e279, NAME => 'testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:36:56,840 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. service=AccessControlService 2024-11-13T16:36:56,841 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:36:56,841 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,841 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:36:56,841 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,841 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,843 INFO [StoreOpener-e84a8e5fa28b91e9896dcf71c1fcc137-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e84a8e5fa28b91e9896dcf71c1fcc137 columnFamilyName cf 2024-11-13T16:36:56,844 DEBUG [StoreOpener-e84a8e5fa28b91e9896dcf71c1fcc137-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:56,848 INFO [StoreOpener-e84a8e5fa28b91e9896dcf71c1fcc137-1 {}] regionserver.HStore(327): Store=e84a8e5fa28b91e9896dcf71c1fcc137/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:56,848 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,851 INFO [StoreOpener-f3ca86f2b19766b9aaff87f74667e279-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,852 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,855 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,859 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,859 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,859 INFO [StoreOpener-f3ca86f2b19766b9aaff87f74667e279-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f3ca86f2b19766b9aaff87f74667e279 columnFamilyName cf 2024-11-13T16:36:56,866 DEBUG [StoreOpener-f3ca86f2b19766b9aaff87f74667e279-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:56,869 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,870 INFO [StoreOpener-f3ca86f2b19766b9aaff87f74667e279-1 {}] regionserver.HStore(327): Store=f3ca86f2b19766b9aaff87f74667e279/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:36:56,872 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,875 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,877 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,880 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,880 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,889 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:56,890 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened e84a8e5fa28b91e9896dcf71c1fcc137; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73961389, jitterRate=0.10211057960987091}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:56,890 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:56,890 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for e84a8e5fa28b91e9896dcf71c1fcc137: Running coprocessor pre-open hook at 1731515816835Writing region info on filesystem at 1731515816835Initializing all the Stores at 1731515816836 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515816836Cleaning up temporary data from old regions at 1731515816859 (+23 ms)Running coprocessor post-open hooks at 1731515816890 (+31 ms)Region opened successfully at 1731515816890 2024-11-13T16:36:56,890 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,893 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137., pid=179, masterSystemTime=1731515816830 2024-11-13T16:36:56,899 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,899 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:56,902 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=e84a8e5fa28b91e9896dcf71c1fcc137, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:36:56,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:36:56,912 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:36:56,914 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened f3ca86f2b19766b9aaff87f74667e279; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68897253, jitterRate=0.026649072766304016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:36:56,914 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:56,914 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for f3ca86f2b19766b9aaff87f74667e279: Running coprocessor pre-open hook at 1731515816841Writing region info on filesystem at 1731515816841Initializing all the Stores at 1731515816844 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515816844Cleaning up temporary data from old regions at 1731515816880 (+36 ms)Running coprocessor post-open hooks at 1731515816914 (+34 ms)Region opened successfully at 1731515816914 2024-11-13T16:36:56,918 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279., pid=178, masterSystemTime=1731515816829 2024-11-13T16:36:56,922 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,922 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:56,924 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=f3ca86f2b19766b9aaff87f74667e279, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:36:56,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=176 2024-11-13T16:36:56,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure f3ca86f2b19766b9aaff87f74667e279, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:36:56,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, ASSIGN in 415 msec 2024-11-13T16:36:56,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137, server=f8f85b0bb1d8,38523,1731515529292 in 240 msec 2024-11-13T16:36:56,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-11-13T16:36:56,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure f3ca86f2b19766b9aaff87f74667e279, server=f8f85b0bb1d8,38437,1731515528789 in 254 msec 2024-11-13T16:36:56,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=177, resume processing ppid=175 2024-11-13T16:36:56,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, ASSIGN in 424 msec 2024-11-13T16:36:56,947 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:36:56,947 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515816947"}]},"ts":"1731515816947"} 2024-11-13T16:36:56,955 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-13T16:36:56,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T16:36:56,962 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:36:56,963 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-13T16:36:56,975 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T16:36:57,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:57,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:57,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:57,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:36:57,050 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,051 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,055 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,055 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,060 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,060 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:36:57,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 738 msec 2024-11-13T16:36:57,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-13T16:36:57,470 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T16:36:57,471 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:57,482 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-13T16:36:57,483 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:57,483 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:57,488 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:57,513 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:57,529 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:57,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T16:36:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515817538 (current time:1731515817538). 2024-11-13T16:36:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:36:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-13T16:36:57,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@667ba9aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:57,545 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:57,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:57,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:57,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e4e1bcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:57,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:57,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:57,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:57,548 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:57,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3937eede, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:57,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:57,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:57,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:57,567 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:57,571 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:57,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:57,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:57,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:57,582 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:57,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e2293a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:57,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:57,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:57,593 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:57,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:57,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:57,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55b98348, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:57,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:57,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:57,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:57,596 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:57,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3589c23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:57,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:57,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:57,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:57,600 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:57,603 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:57,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:57,605 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:57,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:57,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T16:36:57,616 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:57,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T16:36:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-13T16:36:57,625 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:57,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T16:36:57,627 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:57,635 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:57,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-13T16:36:57,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-13T16:36:57,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-13T16:36:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T16:36:57,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742257_1433 (size=161) 2024-11-13T16:36:57,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742257_1433 (size=161) 2024-11-13T16:36:57,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742257_1433 (size=161) 2024-11-13T16:36:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T16:36:58,177 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:58,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279}] 2024-11-13T16:36:58,182 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:58,183 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:58,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T16:36:58,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-13T16:36:58,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-13T16:36:58,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:58,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:58,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for e84a8e5fa28b91e9896dcf71c1fcc137: 2024-11-13T16:36:58,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for f3ca86f2b19766b9aaff87f74667e279: 2024-11-13T16:36:58,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. for emptySnaptb0-testExportWithChecksum completed. 2024-11-13T16:36:58,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. for emptySnaptb0-testExportWithChecksum completed. 2024-11-13T16:36:58,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-13T16:36:58,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-13T16:36:58,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:58,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:58,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:36:58,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:36:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742259_1435 (size=68) 2024-11-13T16:36:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742259_1435 (size=68) 2024-11-13T16:36:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742259_1435 (size=68) 2024-11-13T16:36:58,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:58,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-13T16:36:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-13T16:36:58,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:58,380 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:58,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137 in 206 msec 2024-11-13T16:36:58,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742258_1434 (size=68) 2024-11-13T16:36:58,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742258_1434 (size=68) 2024-11-13T16:36:58,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742258_1434 (size=68) 2024-11-13T16:36:58,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:58,400 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-13T16:36:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-13T16:36:58,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:58,401 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:58,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-13T16:36:58,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279 in 226 msec 2024-11-13T16:36:58,406 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:58,411 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:58,415 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:58,415 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:58,415 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:58,418 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:36:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742260_1436 (size=60) 2024-11-13T16:36:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742260_1436 (size=60) 2024-11-13T16:36:58,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742260_1436 (size=60) 2024-11-13T16:36:58,444 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:58,445 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-13T16:36:58,446 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-13T16:36:58,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742261_1437 (size=641) 2024-11-13T16:36:58,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742261_1437 (size=641) 2024-11-13T16:36:58,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742261_1437 (size=641) 2024-11-13T16:36:58,543 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:58,591 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:58,593 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-13T16:36:58,607 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:58,607 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-13T16:36:58,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 991 msec 2024-11-13T16:36:58,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-13T16:36:58,759 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T16:36:58,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38523 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:58,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:36:58,785 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:58,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-13T16:36:58,799 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:58,800 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:36:58,803 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:58,818 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:58,851 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-13T16:36:58,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T16:36:58,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515818856 (current time:1731515818856). 2024-11-13T16:36:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:36:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-13T16:36:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:36:58,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c9a0e20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:58,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:58,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:58,870 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:58,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:58,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:58,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b97f395, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:58,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:58,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:58,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:58,873 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:58,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f754db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:58,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:58,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:58,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:58,886 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54980, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:58,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:58,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:58,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:58,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:58,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2145a9d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:36:58,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:36:58,895 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:36:58,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:36:58,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:36:58,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@508fdebc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:58,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:36:58,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:36:58,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:58,902 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35266, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:36:58,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ddd0389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:36:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:36:58,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:36:58,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:58,910 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:58,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2] 2024-11-13T16:36:58,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:36:58,918 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:36:58,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:36:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:36:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:36:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T16:36:58,929 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:36:58,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:36:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-13T16:36:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-13T16:36:58,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T16:36:58,952 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:36:58,967 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:36:58,991 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:36:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T16:36:59,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742262_1438 (size=156) 2024-11-13T16:36:59,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742262_1438 (size=156) 2024-11-13T16:36:59,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742262_1438 (size=156) 2024-11-13T16:36:59,071 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:36:59,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279}] 2024-11-13T16:36:59,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:59,076 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:59,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38523 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-13T16:36:59,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:59,238 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing e84a8e5fa28b91e9896dcf71c1fcc137 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-13T16:36:59,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-13T16:36:59,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:59,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing f3ca86f2b19766b9aaff87f74667e279 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-13T16:36:59,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T16:36:59,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137 is 71, key is 0373657a105eecd5b9fc95be33902dc7/cf:q/1731515818775/Put/seqid=0 2024-11-13T16:36:59,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279 is 71, key is 13784a23cc00a96dee77610b0a5c1c7a/cf:q/1731515818782/Put/seqid=0 2024-11-13T16:36:59,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742263_1439 (size=5311) 2024-11-13T16:36:59,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742263_1439 (size=5311) 2024-11-13T16:36:59,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742263_1439 (size=5311) 2024-11-13T16:36:59,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:59,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742264_1440 (size=7962) 2024-11-13T16:36:59,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742264_1440 (size=7962) 2024-11-13T16:36:59,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742264_1440 (size=7962) 2024-11-13T16:36:59,467 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:59,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:59,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/.tmp/cf/a09772b4645e4febbb8fc74773a2fcbb, store: [table=testtb-testExportWithChecksum family=cf region=e84a8e5fa28b91e9896dcf71c1fcc137] 2024-11-13T16:36:59,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/.tmp/cf/a09772b4645e4febbb8fc74773a2fcbb is 206, key is 0cf594ff6c9ed19b64f79a66316fb59d7/cf:q/1731515818775/Put/seqid=0 2024-11-13T16:36:59,500 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:59,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/.tmp/cf/732fda776387496585976f5255d4009c, store: [table=testtb-testExportWithChecksum family=cf region=f3ca86f2b19766b9aaff87f74667e279] 2024-11-13T16:36:59,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/.tmp/cf/732fda776387496585976f5255d4009c is 206, key is 11ec9b0c8d859f06acb5dd1cb3a557c2f/cf:q/1731515818782/Put/seqid=0 2024-11-13T16:36:59,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742265_1441 (size=6510) 2024-11-13T16:36:59,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742265_1441 (size=6510) 2024-11-13T16:36:59,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742265_1441 (size=6510) 2024-11-13T16:36:59,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/.tmp/cf/a09772b4645e4febbb8fc74773a2fcbb 2024-11-13T16:36:59,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742266_1442 (size=14249) 2024-11-13T16:36:59,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742266_1442 (size=14249) 2024-11-13T16:36:59,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742266_1442 (size=14249) 2024-11-13T16:36:59,592 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/.tmp/cf/732fda776387496585976f5255d4009c 2024-11-13T16:36:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T16:36:59,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/.tmp/cf/a09772b4645e4febbb8fc74773a2fcbb as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf/a09772b4645e4febbb8fc74773a2fcbb 2024-11-13T16:36:59,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf/a09772b4645e4febbb8fc74773a2fcbb, entries=6, sequenceid=6, filesize=6.4 K 2024-11-13T16:36:59,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/.tmp/cf/732fda776387496585976f5255d4009c as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c 2024-11-13T16:36:59,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for e84a8e5fa28b91e9896dcf71c1fcc137 in 409ms, sequenceid=6, compaction requested=false 2024-11-13T16:36:59,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-13T16:36:59,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for e84a8e5fa28b91e9896dcf71c1fcc137: 2024-11-13T16:36:59,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. for snaptb0-testExportWithChecksum completed. 2024-11-13T16:36:59,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-13T16:36:59,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:59,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf/a09772b4645e4febbb8fc74773a2fcbb] hfiles 2024-11-13T16:36:59,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf/a09772b4645e4febbb8fc74773a2fcbb for snapshot=snaptb0-testExportWithChecksum 2024-11-13T16:36:59,677 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c, entries=44, sequenceid=6, filesize=13.9 K 2024-11-13T16:36:59,687 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for f3ca86f2b19766b9aaff87f74667e279 in 445ms, sequenceid=6, compaction requested=false 2024-11-13T16:36:59,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for f3ca86f2b19766b9aaff87f74667e279: 2024-11-13T16:36:59,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. for snaptb0-testExportWithChecksum completed. 2024-11-13T16:36:59,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-13T16:36:59,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:36:59,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c] hfiles 2024-11-13T16:36:59,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c for snapshot=snaptb0-testExportWithChecksum 2024-11-13T16:36:59,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742267_1443 (size=107) 2024-11-13T16:36:59,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742267_1443 (size=107) 2024-11-13T16:36:59,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742267_1443 (size=107) 2024-11-13T16:36:59,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:36:59,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-13T16:36:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-13T16:36:59,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:59,707 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:59,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137 in 637 msec 2024-11-13T16:36:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742268_1444 (size=107) 2024-11-13T16:36:59,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742268_1444 (size=107) 2024-11-13T16:36:59,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:36:59,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-13T16:36:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-13T16:36:59,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:59,759 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:59,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742268_1444 (size=107) 2024-11-13T16:36:59,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=185, resume processing ppid=183 2024-11-13T16:36:59,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f3ca86f2b19766b9aaff87f74667e279 in 690 msec 2024-11-13T16:36:59,783 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:36:59,785 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:36:59,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:36:59,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:36:59,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:36:59,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137] hfiles 2024-11-13T16:36:59,806 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:36:59,806 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:36:59,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742269_1445 (size=291) 2024-11-13T16:36:59,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742269_1445 (size=291) 2024-11-13T16:36:59,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742269_1445 (size=291) 2024-11-13T16:36:59,859 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:36:59,859 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-13T16:36:59,860 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T16:36:59,880 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0007_000001 (auth:SIMPLE) from 127.0.0.1:54696 2024-11-13T16:36:59,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742270_1446 (size=951) 2024-11-13T16:36:59,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742270_1446 (size=951) 2024-11-13T16:36:59,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742270_1446 (size=951) 2024-11-13T16:36:59,909 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0007/container_1731515545832_0007_01_000001/launch_container.sh] 2024-11-13T16:36:59,909 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0007/container_1731515545832_0007_01_000001/container_tokens] 2024-11-13T16:36:59,909 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_1/usercache/jenkins/appcache/application_1731515545832_0007/container_1731515545832_0007_01_000001/sysfs] 2024-11-13T16:36:59,914 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:36:59,924 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:36:59,925 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-13T16:36:59,926 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:36:59,926 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-13T16:36:59,928 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 994 msec 2024-11-13T16:36:59,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6d9281e5cd6679c218f14227797e5659, had cached 0 bytes from a total of 5490 2024-11-13T16:36:59,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 33bde9964e83af90386f782c94c4e709, had cached 0 bytes from a total of 15057 2024-11-13T16:37:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-13T16:37:00,110 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T16:37:00,110 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110 2024-11-13T16:37:00,110 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:37:00,151 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:37:00,151 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@1d278bea, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T16:37:00,157 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:37:00,179 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T16:37:00,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:00,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:00,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,076 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:37:01,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-15220754205208323364.jar 2024-11-13T16:37:01,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,612 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-7010511850398304353.jar 2024-11-13T16:37:01,613 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,613 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,615 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:01,615 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:37:01,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:37:01,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:37:01,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:37:01,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:37:01,617 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:37:01,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:37:01,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:37:01,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:37:01,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:37:01,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:37:01,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:01,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:01,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:37:01,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:01,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:01,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:37:01,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:37:01,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742271_1447 (size=131440) 2024-11-13T16:37:01,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742271_1447 (size=131440) 2024-11-13T16:37:01,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742271_1447 (size=131440) 2024-11-13T16:37:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742272_1448 (size=4188619) 2024-11-13T16:37:01,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742272_1448 (size=4188619) 2024-11-13T16:37:01,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742272_1448 (size=4188619) 2024-11-13T16:37:01,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742273_1449 (size=1323991) 2024-11-13T16:37:01,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742273_1449 (size=1323991) 2024-11-13T16:37:01,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742273_1449 (size=1323991) 2024-11-13T16:37:01,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742274_1450 (size=903731) 2024-11-13T16:37:01,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742274_1450 (size=903731) 2024-11-13T16:37:01,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742274_1450 (size=903731) 2024-11-13T16:37:02,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742275_1451 (size=8360083) 2024-11-13T16:37:02,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742275_1451 (size=8360083) 2024-11-13T16:37:02,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742275_1451 (size=8360083) 2024-11-13T16:37:02,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742276_1452 (size=440392) 2024-11-13T16:37:02,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742276_1452 (size=440392) 2024-11-13T16:37:02,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742276_1452 (size=440392) 2024-11-13T16:37:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742277_1453 (size=1877034) 2024-11-13T16:37:02,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742277_1453 (size=1877034) 2024-11-13T16:37:02,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742277_1453 (size=1877034) 2024-11-13T16:37:02,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742278_1454 (size=77835) 2024-11-13T16:37:02,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742278_1454 (size=77835) 2024-11-13T16:37:02,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742278_1454 (size=77835) 2024-11-13T16:37:02,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742279_1455 (size=30949) 2024-11-13T16:37:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742279_1455 (size=30949) 2024-11-13T16:37:02,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742279_1455 (size=30949) 2024-11-13T16:37:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742280_1456 (size=1597327) 2024-11-13T16:37:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742280_1456 (size=1597327) 2024-11-13T16:37:03,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742280_1456 (size=1597327) 2024-11-13T16:37:03,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742281_1457 (size=4695811) 2024-11-13T16:37:03,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742281_1457 (size=4695811) 2024-11-13T16:37:03,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742281_1457 (size=4695811) 2024-11-13T16:37:03,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742282_1458 (size=232957) 2024-11-13T16:37:03,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742282_1458 (size=232957) 2024-11-13T16:37:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742282_1458 (size=232957) 2024-11-13T16:37:03,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742283_1459 (size=127628) 2024-11-13T16:37:03,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742283_1459 (size=127628) 2024-11-13T16:37:03,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742283_1459 (size=127628) 2024-11-13T16:37:03,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742284_1460 (size=20406) 2024-11-13T16:37:03,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742284_1460 (size=20406) 2024-11-13T16:37:03,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742284_1460 (size=20406) 2024-11-13T16:37:03,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742285_1461 (size=5175431) 2024-11-13T16:37:03,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742285_1461 (size=5175431) 2024-11-13T16:37:03,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742285_1461 (size=5175431) 2024-11-13T16:37:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742286_1462 (size=6424544) 2024-11-13T16:37:03,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742286_1462 (size=6424544) 2024-11-13T16:37:03,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742286_1462 (size=6424544) 2024-11-13T16:37:03,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742287_1463 (size=217634) 2024-11-13T16:37:03,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742287_1463 (size=217634) 2024-11-13T16:37:03,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742287_1463 (size=217634) 2024-11-13T16:37:03,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742288_1464 (size=1832290) 2024-11-13T16:37:03,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742288_1464 (size=1832290) 2024-11-13T16:37:03,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742288_1464 (size=1832290) 2024-11-13T16:37:03,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742289_1465 (size=322274) 2024-11-13T16:37:03,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742289_1465 (size=322274) 2024-11-13T16:37:03,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742289_1465 (size=322274) 2024-11-13T16:37:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742290_1466 (size=503880) 2024-11-13T16:37:03,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742290_1466 (size=503880) 2024-11-13T16:37:03,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742290_1466 (size=503880) 2024-11-13T16:37:03,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742291_1467 (size=29229) 2024-11-13T16:37:03,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742291_1467 (size=29229) 2024-11-13T16:37:03,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742291_1467 (size=29229) 2024-11-13T16:37:03,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742292_1468 (size=24096) 2024-11-13T16:37:03,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742292_1468 (size=24096) 2024-11-13T16:37:03,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742292_1468 (size=24096) 2024-11-13T16:37:03,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742293_1469 (size=111872) 2024-11-13T16:37:03,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742293_1469 (size=111872) 2024-11-13T16:37:03,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742293_1469 (size=111872) 2024-11-13T16:37:03,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742294_1470 (size=45609) 2024-11-13T16:37:03,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742294_1470 (size=45609) 2024-11-13T16:37:03,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742294_1470 (size=45609) 2024-11-13T16:37:03,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742295_1471 (size=136454) 2024-11-13T16:37:03,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742295_1471 (size=136454) 2024-11-13T16:37:03,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742295_1471 (size=136454) 2024-11-13T16:37:03,746 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:37:03,753 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-13T16:37:03,762 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T16:37:03,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742296_1472 (size=714) 2024-11-13T16:37:03,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742296_1472 (size=714) 2024-11-13T16:37:03,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742296_1472 (size=714) 2024-11-13T16:37:03,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742297_1473 (size=15) 2024-11-13T16:37:03,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742297_1473 (size=15) 2024-11-13T16:37:03,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742297_1473 (size=15) 2024-11-13T16:37:03,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742298_1474 (size=303774) 2024-11-13T16:37:03,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742298_1474 (size=303774) 2024-11-13T16:37:03,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742298_1474 (size=303774) 2024-11-13T16:37:04,119 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:37:04,119 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:37:04,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:44888 2024-11-13T16:37:05,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:37:08,996 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-13T16:37:09,219 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-13T16:37:09,364 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-13T16:37:09,527 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=20, reuseRatio=66.67% 2024-11-13T16:37:09,528 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T16:37:13,352 INFO [regionserver/f8f85b0bb1d8:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-13T16:37:13,410 INFO [regionserver/f8f85b0bb1d8:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-13T16:37:13,433 INFO [regionserver/f8f85b0bb1d8:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-13T16:37:16,292 INFO [regionserver/f8f85b0bb1d8:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 99791 ms 2024-11-13T16:37:18,014 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:55148 2024-11-13T16:37:18,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742299_1475 (size=349424) 2024-11-13T16:37:18,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742299_1475 (size=349424) 2024-11-13T16:37:18,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742299_1475 (size=349424) 2024-11-13T16:37:20,592 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:37:20,731 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:48408 2024-11-13T16:37:20,777 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f3ca86f2b19766b9aaff87f74667e279 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:37:20,778 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e84a8e5fa28b91e9896dcf71c1fcc137 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:37:20,783 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-13T16:37:20,783 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-13T16:37:20,792 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T16:37:20,817 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-13T16:37:20,817 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-13T16:37:20,817 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-13T16:37:20,818 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:37:20,828 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 1 regions 2024-11-13T16:37:20,828 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 3 regions 2024-11-13T16:37:20,828 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-11-13T16:37:20,828 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:37:20,828 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:37:20,828 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:37:20,828 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:37:20,829 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:37:20,829 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:37:20,829 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-11-13T16:37:20,832 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-11-13T16:37:20,839 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25153287603160346, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8212752852375836, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8473024055177883, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-11-13T16:37:21,466 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 636 ms to try 14400 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25153287603160346 to a new imbalance of 0.01566648290023645. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8212752852375836, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8473024055177883, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-13T16:37:21,473 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 6 2024-11-13T16:37:21,474 INFO [master/f8f85b0bb1d8:0.Chore.1 {}] master.HMaster(2172): balance hri=618c30cdb779cfeb9cea6e5be1e51395, source=f8f85b0bb1d8,38523,1731515529292, destination=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:37:21,476 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, REOPEN/MOVE 2024-11-13T16:37:21,476 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, REOPEN/MOVE 2024-11-13T16:37:21,477 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=186 updating hbase:meta row=618c30cdb779cfeb9cea6e5be1e51395, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:37:21,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=186, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, REOPEN/MOVE because future has completed 2024-11-13T16:37:21,484 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:37:21,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE, hasLock=false; CloseRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:37:21,647 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] handler.UnassignRegionHandler(122): Close 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:21,648 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:37:21,648 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1722): Closing 618c30cdb779cfeb9cea6e5be1e51395, disabling compactions & flushes 2024-11-13T16:37:21,648 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1755): Closing region hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:21,648 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:21,648 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. after waiting 0 ms 2024-11-13T16:37:21,648 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:21,648 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(2902): Flushing 618c30cdb779cfeb9cea6e5be1e51395 1/1 column families, dataSize=1.19 KB heapSize=2.92 KB 2024-11-13T16:37:21,708 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/.tmp/l/c02cd3fcd0fe4ef5881c247913870981 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1731515770172/DeleteFamily/seqid=0 2024-11-13T16:37:21,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742300_1476 (size=5642) 2024-11-13T16:37:21,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742300_1476 (size=5642) 2024-11-13T16:37:21,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742300_1476 (size=5642) 2024-11-13T16:37:21,833 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.19 KB at sequenceid=24 (bloomFilter=false), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/.tmp/l/c02cd3fcd0fe4ef5881c247913870981 2024-11-13T16:37:21,854 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c02cd3fcd0fe4ef5881c247913870981 2024-11-13T16:37:21,862 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/.tmp/l/c02cd3fcd0fe4ef5881c247913870981 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/l/c02cd3fcd0fe4ef5881c247913870981 2024-11-13T16:37:21,890 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c02cd3fcd0fe4ef5881c247913870981 2024-11-13T16:37:21,890 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/l/c02cd3fcd0fe4ef5881c247913870981, entries=11, sequenceid=24, filesize=5.5 K 2024-11-13T16:37:21,901 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~1.19 KB/1222, heapSize ~2.91 KB/2976, currentSize=0 B/0 for 618c30cdb779cfeb9cea6e5be1e51395 in 251ms, sequenceid=24, compaction requested=false 2024-11-13T16:37:21,983 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/recovered.edits/27.seqid, newMaxSeqId=27, maxSeqId=1 2024-11-13T16:37:21,985 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:37:21,985 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1973): Closed hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:21,985 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegion(1676): Region close journal for 618c30cdb779cfeb9cea6e5be1e51395: Waiting for close lock at 1731515841648Running coprocessor pre-close hooks at 1731515841648Disabling compacts and flushes for region at 1731515841648Disabling writes for close at 1731515841648Obtaining lock to block concurrent updates at 1731515841648Preparing flush snapshotting stores in 618c30cdb779cfeb9cea6e5be1e51395 at 1731515841648Finished memstore snapshotting hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., syncing WAL and waiting on mvcc, flushsize=dataSize=1222, getHeapSize=2976, getOffHeapSize=0, getCellsCount=20 at 1731515841649 (+1 ms)Flushing stores of hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. at 1731515841654 (+5 ms)Flushing 618c30cdb779cfeb9cea6e5be1e51395/l: creating writer at 1731515841654Flushing 618c30cdb779cfeb9cea6e5be1e51395/l: appending metadata at 1731515841707 (+53 ms)Flushing 618c30cdb779cfeb9cea6e5be1e51395/l: closing flushed file at 1731515841707Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53cf2602: reopening flushed file at 1731515841856 (+149 ms)Finished flush of dataSize ~1.19 KB/1222, heapSize ~2.91 KB/2976, currentSize=0 B/0 for 618c30cdb779cfeb9cea6e5be1e51395 in 251ms, sequenceid=24, compaction requested=false at 1731515841901 (+45 ms)Writing region close event to WAL at 1731515841913 (+12 ms)Running coprocessor post-close hooks at 1731515841985 (+72 ms)Closed at 1731515841985 2024-11-13T16:37:21,986 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] regionserver.HRegionServer(3302): Adding 618c30cdb779cfeb9cea6e5be1e51395 move to f8f85b0bb1d8,38437,1731515528789 record at close sequenceid=24 2024-11-13T16:37:21,990 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=187}] handler.UnassignRegionHandler(157): Closed 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:21,991 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=186 updating hbase:meta row=618c30cdb779cfeb9cea6e5be1e51395, regionState=CLOSED 2024-11-13T16:37:22,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=187, ppid=186, state=RUNNABLE, hasLock=false; CloseRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:37:22,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-13T16:37:22,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38523,1731515529292 in 531 msec 2024-11-13T16:37:22,022 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=186, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, REOPEN/MOVE; state=CLOSED, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:37:22,174 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T16:37:22,175 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=186 updating hbase:meta row=618c30cdb779cfeb9cea6e5be1e51395, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:37:22,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=186, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, REOPEN/MOVE because future has completed 2024-11-13T16:37:22,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=186, state=RUNNABLE, hasLock=false; OpenRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:37:22,387 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] handler.AssignRegionHandler(132): Open hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:22,387 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(7752): Opening region: {ENCODED => 618c30cdb779cfeb9cea6e5be1e51395, NAME => 'hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.', STARTKEY => '', ENDKEY => ''} 2024-11-13T16:37:22,387 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. service=AccessControlService 2024-11-13T16:37:22,388 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:37:22,388 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,388 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(898): Instantiated hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:37:22,388 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(7794): checking encryption for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,388 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(7797): checking classloading for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,406 INFO [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,408 INFO [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 618c30cdb779cfeb9cea6e5be1e51395 columnFamilyName l 2024-11-13T16:37:22,408 DEBUG [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:37:22,478 INFO [StoreFileOpener-618c30cdb779cfeb9cea6e5be1e51395-l-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c02cd3fcd0fe4ef5881c247913870981 2024-11-13T16:37:22,479 DEBUG [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/l/c02cd3fcd0fe4ef5881c247913870981 2024-11-13T16:37:22,479 INFO [StoreOpener-618c30cdb779cfeb9cea6e5be1e51395-1 {}] regionserver.HStore(327): Store=618c30cdb779cfeb9cea6e5be1e51395/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:37:22,479 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1038): replaying wal for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,482 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,494 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,495 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1048): stopping wal replay for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,495 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1060): Cleaning up temporary data for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1093): writing seq id for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,523 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1114): Opened 618c30cdb779cfeb9cea6e5be1e51395; next sequenceid=28; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59910738, jitterRate=-0.10726043581962585}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:37:22,523 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:37:22,537 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-13T16:37:22,538 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-13T16:37:22,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-11-13T16:37:22,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-11-13T16:37:22,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-11-13T16:37:22,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-11-13T16:37:22,611 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:37:22,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:37:22,632 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegion(1006): Region open journal for 618c30cdb779cfeb9cea6e5be1e51395: Running coprocessor pre-open hook at 1731515842388Writing region info on filesystem at 1731515842388Initializing all the Stores at 1731515842402 (+14 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731515842403 (+1 ms)Cleaning up temporary data from old regions at 1731515842495 (+92 ms)Running coprocessor post-open hooks at 1731515842523 (+28 ms)Region opened successfully at 1731515842632 (+109 ms) 2024-11-13T16:37:22,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:37:22,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:37:22,634 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., pid=188, masterSystemTime=1731515842348 2024-11-13T16:37:22,634 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,635 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,638 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:22,639 INFO [RS_OPEN_PRIORITY_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=188}] handler.AssignRegionHandler(153): Opened hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:37:22,639 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,643 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-13T16:37:22,643 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=186 updating hbase:meta row=618c30cdb779cfeb9cea6e5be1e51395, regionState=OPEN, openSeqNum=28, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:37:22,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=186, state=RUNNABLE, hasLock=false; OpenRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:37:22,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=188, resume processing ppid=186 2024-11-13T16:37:22,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=186, state=SUCCESS, hasLock=false; OpenRegionProcedure 618c30cdb779cfeb9cea6e5be1e51395, server=f8f85b0bb1d8,38437,1731515528789 in 471 msec 2024-11-13T16:37:22,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=618c30cdb779cfeb9cea6e5be1e51395, REOPEN/MOVE in 1.1910 sec 2024-11-13T16:37:22,686 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-13T16:37:25,358 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-13T16:37:27,491 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:37:31,703 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000002/launch_container.sh] 2024-11-13T16:37:31,703 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000002/container_tokens] 2024-11-13T16:37:31,703 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110/archive/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-13T16:37:33,266 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:35092 2024-11-13T16:37:35,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:37:39,294 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000003/launch_container.sh] 2024-11-13T16:37:39,296 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000003/container_tokens] 2024-11-13T16:37:39,296 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_2/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110/archive/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-13T16:37:40,314 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:33230 2024-11-13T16:37:41,835 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e84a8e5fa28b91e9896dcf71c1fcc137, had cached 0 bytes from a total of 6510 2024-11-13T16:37:41,841 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f3ca86f2b19766b9aaff87f74667e279, had cached 0 bytes from a total of 14249 2024-11-13T16:37:44,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6d9281e5cd6679c218f14227797e5659, had cached 0 bytes from a total of 5490 2024-11-13T16:37:44,995 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 33bde9964e83af90386f782c94c4e709, had cached 0 bytes from a total of 15057 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/local-export-1731515820110/archive/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-13T16:37:49,357 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:34202 2024-11-13T16:37:53,248 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000004/launch_container.sh] 2024-11-13T16:37:53,248 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000004/container_tokens] 2024-11-13T16:37:53,248 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_0/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000004/sysfs] 2024-11-13T16:37:56,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742301_1477 (size=21331) 2024-11-13T16:37:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742301_1477 (size=21331) 2024-11-13T16:37:56,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742301_1477 (size=21331) 2024-11-13T16:37:56,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742302_1478 (size=460) 2024-11-13T16:37:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742302_1478 (size=460) 2024-11-13T16:37:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742302_1478 (size=460) 2024-11-13T16:37:56,560 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_0/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000005/launch_container.sh] 2024-11-13T16:37:56,561 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_0/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000005/container_tokens] 2024-11-13T16:37:56,561 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_0/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000005/sysfs] 2024-11-13T16:37:56,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742303_1479 (size=21331) 2024-11-13T16:37:56,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742303_1479 (size=21331) 2024-11-13T16:37:56,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742303_1479 (size=21331) 2024-11-13T16:37:56,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742304_1480 (size=349424) 2024-11-13T16:37:56,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742304_1480 (size=349424) 2024-11-13T16:37:56,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742304_1480 (size=349424) 2024-11-13T16:37:56,744 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:43454 2024-11-13T16:37:58,037 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1731515545832_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:37:58,039 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039 2024-11-13T16:37:58,039 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:37:58,093 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:37:58,093 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T16:37:58,097 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:37:58,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-13T16:37:58,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742305_1481 (size=156) 2024-11-13T16:37:58,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742305_1481 (size=156) 2024-11-13T16:37:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742305_1481 (size=156) 2024-11-13T16:37:58,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742306_1482 (size=951) 2024-11-13T16:37:58,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742306_1482 (size=951) 2024-11-13T16:37:58,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742306_1482 (size=951) 2024-11-13T16:37:58,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:58,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:58,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-11936389014083734791.jar 2024-11-13T16:37:59,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-13221983238670893985.jar 2024-11-13T16:37:59,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:37:59,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:37:59,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:37:59,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:37:59,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:37:59,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:37:59,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:37:59,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:37:59,563 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:37:59,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:37:59,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:37:59,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:37:59,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:59,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:59,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:37:59,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:59,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:37:59,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:37:59,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:37:59,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742307_1483 (size=131440) 2024-11-13T16:37:59,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742307_1483 (size=131440) 2024-11-13T16:37:59,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742307_1483 (size=131440) 2024-11-13T16:37:59,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742308_1484 (size=4188619) 2024-11-13T16:37:59,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742308_1484 (size=4188619) 2024-11-13T16:37:59,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742308_1484 (size=4188619) 2024-11-13T16:37:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742309_1485 (size=1323991) 2024-11-13T16:37:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742309_1485 (size=1323991) 2024-11-13T16:38:00,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742309_1485 (size=1323991) 2024-11-13T16:38:00,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742310_1486 (size=903731) 2024-11-13T16:38:00,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742310_1486 (size=903731) 2024-11-13T16:38:00,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742310_1486 (size=903731) 2024-11-13T16:38:00,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742311_1487 (size=8360083) 2024-11-13T16:38:00,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742311_1487 (size=8360083) 2024-11-13T16:38:00,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742311_1487 (size=8360083) 2024-11-13T16:38:00,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742312_1488 (size=6424544) 2024-11-13T16:38:00,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742312_1488 (size=6424544) 2024-11-13T16:38:00,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742312_1488 (size=6424544) 2024-11-13T16:38:01,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742313_1489 (size=1877034) 2024-11-13T16:38:01,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742313_1489 (size=1877034) 2024-11-13T16:38:01,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742313_1489 (size=1877034) 2024-11-13T16:38:01,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742314_1490 (size=77835) 2024-11-13T16:38:01,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742314_1490 (size=77835) 2024-11-13T16:38:01,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742314_1490 (size=77835) 2024-11-13T16:38:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742315_1491 (size=30949) 2024-11-13T16:38:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742315_1491 (size=30949) 2024-11-13T16:38:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742315_1491 (size=30949) 2024-11-13T16:38:01,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742316_1492 (size=1597327) 2024-11-13T16:38:01,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742316_1492 (size=1597327) 2024-11-13T16:38:01,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742316_1492 (size=1597327) 2024-11-13T16:38:01,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742317_1493 (size=4695811) 2024-11-13T16:38:01,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742317_1493 (size=4695811) 2024-11-13T16:38:01,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742317_1493 (size=4695811) 2024-11-13T16:38:01,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742318_1494 (size=232957) 2024-11-13T16:38:01,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742318_1494 (size=232957) 2024-11-13T16:38:01,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742318_1494 (size=232957) 2024-11-13T16:38:02,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742319_1495 (size=127628) 2024-11-13T16:38:02,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742319_1495 (size=127628) 2024-11-13T16:38:02,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742319_1495 (size=127628) 2024-11-13T16:38:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742320_1496 (size=20406) 2024-11-13T16:38:02,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742320_1496 (size=20406) 2024-11-13T16:38:02,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742320_1496 (size=20406) 2024-11-13T16:38:02,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742321_1497 (size=5175431) 2024-11-13T16:38:02,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742321_1497 (size=5175431) 2024-11-13T16:38:02,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742321_1497 (size=5175431) 2024-11-13T16:38:02,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742322_1498 (size=217634) 2024-11-13T16:38:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742322_1498 (size=217634) 2024-11-13T16:38:02,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742322_1498 (size=217634) 2024-11-13T16:38:02,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742323_1499 (size=1832290) 2024-11-13T16:38:02,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742323_1499 (size=1832290) 2024-11-13T16:38:02,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742323_1499 (size=1832290) 2024-11-13T16:38:02,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742324_1500 (size=322274) 2024-11-13T16:38:02,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742324_1500 (size=322274) 2024-11-13T16:38:02,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742324_1500 (size=322274) 2024-11-13T16:38:02,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742325_1501 (size=503880) 2024-11-13T16:38:02,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742325_1501 (size=503880) 2024-11-13T16:38:02,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742325_1501 (size=503880) 2024-11-13T16:38:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742326_1502 (size=29229) 2024-11-13T16:38:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742326_1502 (size=29229) 2024-11-13T16:38:02,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742326_1502 (size=29229) 2024-11-13T16:38:02,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742327_1503 (size=440392) 2024-11-13T16:38:02,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742327_1503 (size=440392) 2024-11-13T16:38:02,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742327_1503 (size=440392) 2024-11-13T16:38:02,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742328_1504 (size=24096) 2024-11-13T16:38:02,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742328_1504 (size=24096) 2024-11-13T16:38:02,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742328_1504 (size=24096) 2024-11-13T16:38:02,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742329_1505 (size=111872) 2024-11-13T16:38:02,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742329_1505 (size=111872) 2024-11-13T16:38:02,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742329_1505 (size=111872) 2024-11-13T16:38:02,988 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0008_000001 (auth:SIMPLE) from 127.0.0.1:43456 2024-11-13T16:38:02,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742330_1506 (size=45609) 2024-11-13T16:38:02,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742330_1506 (size=45609) 2024-11-13T16:38:02,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742330_1506 (size=45609) 2024-11-13T16:38:03,014 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000001/launch_container.sh] 2024-11-13T16:38:03,014 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000001/container_tokens] 2024-11-13T16:38:03,014 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0008/container_1731515545832_0008_01_000001/sysfs] 2024-11-13T16:38:03,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742331_1507 (size=136454) 2024-11-13T16:38:03,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742331_1507 (size=136454) 2024-11-13T16:38:03,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742331_1507 (size=136454) 2024-11-13T16:38:03,041 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:38:03,044 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-13T16:38:03,047 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.2 K 2024-11-13T16:38:03,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742332_1508 (size=714) 2024-11-13T16:38:03,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742332_1508 (size=714) 2024-11-13T16:38:03,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742332_1508 (size=714) 2024-11-13T16:38:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742333_1509 (size=15) 2024-11-13T16:38:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742333_1509 (size=15) 2024-11-13T16:38:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742333_1509 (size=15) 2024-11-13T16:38:03,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742334_1510 (size=303728) 2024-11-13T16:38:03,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742334_1510 (size=303728) 2024-11-13T16:38:03,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742334_1510 (size=303728) 2024-11-13T16:38:03,458 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:38:03,458 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:38:03,984 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0009_000001 (auth:SIMPLE) from 127.0.0.1:50000 2024-11-13T16:38:05,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:38:07,388 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 618c30cdb779cfeb9cea6e5be1e51395, had cached 0 bytes from a total of 5642 2024-11-13T16:38:15,712 DEBUG [master/f8f85b0bb1d8:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 618c30cdb779cfeb9cea6e5be1e51395 changed from -1.0 to 0.0, refreshing cache 2024-11-13T16:38:17,298 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0009_000001 (auth:SIMPLE) from 127.0.0.1:50954 2024-11-13T16:38:17,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742335_1511 (size=349378) 2024-11-13T16:38:17,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742335_1511 (size=349378) 2024-11-13T16:38:17,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742335_1511 (size=349378) 2024-11-13T16:38:19,893 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0009_000001 (auth:SIMPLE) from 127.0.0.1:46034 2024-11-13T16:38:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742336_1512 (size=14249) 2024-11-13T16:38:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742336_1512 (size=14249) 2024-11-13T16:38:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742336_1512 (size=14249) 2024-11-13T16:38:26,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742337_1513 (size=7962) 2024-11-13T16:38:26,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742337_1513 (size=7962) 2024-11-13T16:38:26,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742337_1513 (size=7962) 2024-11-13T16:38:26,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742338_1514 (size=6510) 2024-11-13T16:38:26,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742338_1514 (size=6510) 2024-11-13T16:38:26,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742338_1514 (size=6510) 2024-11-13T16:38:26,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742339_1515 (size=5311) 2024-11-13T16:38:26,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742339_1515 (size=5311) 2024-11-13T16:38:26,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742339_1515 (size=5311) 2024-11-13T16:38:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742340_1516 (size=17459) 2024-11-13T16:38:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742340_1516 (size=17459) 2024-11-13T16:38:26,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742340_1516 (size=17459) 2024-11-13T16:38:26,835 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e84a8e5fa28b91e9896dcf71c1fcc137, had cached 0 bytes from a total of 6510 2024-11-13T16:38:26,841 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f3ca86f2b19766b9aaff87f74667e279, had cached 0 bytes from a total of 14249 2024-11-13T16:38:26,927 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0009/container_1731515545832_0009_01_000002/launch_container.sh] 2024-11-13T16:38:26,927 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0009/container_1731515545832_0009_01_000002/container_tokens] 2024-11-13T16:38:26,927 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_3/usercache/jenkins/appcache/application_1731515545832_0009/container_1731515545832_0009_01_000002/sysfs] 2024-11-13T16:38:27,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742341_1517 (size=462) 2024-11-13T16:38:27,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742341_1517 (size=462) 2024-11-13T16:38:27,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742341_1517 (size=462) 2024-11-13T16:38:27,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742342_1518 (size=17459) 2024-11-13T16:38:27,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742342_1518 (size=17459) 2024-11-13T16:38:27,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742342_1518 (size=17459) 2024-11-13T16:38:27,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742343_1519 (size=349378) 2024-11-13T16:38:27,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742343_1519 (size=349378) 2024-11-13T16:38:27,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742343_1519 (size=349378) 2024-11-13T16:38:27,238 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0009_000001 (auth:SIMPLE) from 127.0.0.1:35382 2024-11-13T16:38:28,976 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:38:28,983 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:38:29,035 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-13T16:38:29,035 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:38:29,041 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:38:29,041 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-13T16:38:29,045 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-13T16:38:29,045 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-13T16:38:29,046 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-13T16:38:29,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-13T16:38:29,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515878039/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-13T16:38:29,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithChecksum 2024-11-13T16:38:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=189, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:29,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=189 2024-11-13T16:38:29,131 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515909131"}]},"ts":"1731515909131"} 2024-11-13T16:38:29,145 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-13T16:38:29,145 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-13T16:38:29,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-13T16:38:29,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, UNASSIGN}, {pid=192, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, UNASSIGN}] 2024-11-13T16:38:29,153 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=192, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, UNASSIGN 2024-11-13T16:38:29,154 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, UNASSIGN 2024-11-13T16:38:29,156 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=e84a8e5fa28b91e9896dcf71c1fcc137, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:38:29,156 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=192 updating hbase:meta row=f3ca86f2b19766b9aaff87f74667e279, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:38:29,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, UNASSIGN because future has completed 2024-11-13T16:38:29,165 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=f8f85b0bb1d8,38523,1731515529292, table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:38:29,169 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:38:29,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137, server=f8f85b0bb1d8,38523,1731515529292}] 2024-11-13T16:38:29,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, UNASSIGN because future has completed 2024-11-13T16:38:29,177 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:38:29,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=192, state=RUNNABLE, hasLock=false; CloseRegionProcedure f3ca86f2b19766b9aaff87f74667e279, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:38:29,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=189 2024-11-13T16:38:29,334 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:38:29,334 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:38:29,335 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing e84a8e5fa28b91e9896dcf71c1fcc137, disabling compactions & flushes 2024-11-13T16:38:29,335 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:38:29,335 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:38:29,335 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. after waiting 0 ms 2024-11-13T16:38:29,335 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:38:29,347 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(122): Close f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:38:29,347 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:38:29,347 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1722): Closing f3ca86f2b19766b9aaff87f74667e279, disabling compactions & flushes 2024-11-13T16:38:29,347 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:38:29,347 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:38:29,347 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. after waiting 0 ms 2024-11-13T16:38:29,347 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:38:29,416 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:38:29,418 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:38:29,419 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279. 2024-11-13T16:38:29,419 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1676): Region close journal for f3ca86f2b19766b9aaff87f74667e279: Waiting for close lock at 1731515909347Running coprocessor pre-close hooks at 1731515909347Disabling compacts and flushes for region at 1731515909347Disabling writes for close at 1731515909347Writing region close event to WAL at 1731515909370 (+23 ms)Running coprocessor post-close hooks at 1731515909418 (+48 ms)Closed at 1731515909419 (+1 ms) 2024-11-13T16:38:29,426 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:38:29,429 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(157): Closed f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:38:29,429 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:38:29,430 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137. 2024-11-13T16:38:29,430 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for e84a8e5fa28b91e9896dcf71c1fcc137: Waiting for close lock at 1731515909334Running coprocessor pre-close hooks at 1731515909334Disabling compacts and flushes for region at 1731515909335 (+1 ms)Disabling writes for close at 1731515909335Writing region close event to WAL at 1731515909368 (+33 ms)Running coprocessor post-close hooks at 1731515909429 (+61 ms)Closed at 1731515909430 (+1 ms) 2024-11-13T16:38:29,431 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=192 updating hbase:meta row=f3ca86f2b19766b9aaff87f74667e279, regionState=CLOSED 2024-11-13T16:38:29,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=192, state=RUNNABLE, hasLock=false; CloseRegionProcedure f3ca86f2b19766b9aaff87f74667e279, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:38:29,437 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:38:29,438 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=e84a8e5fa28b91e9896dcf71c1fcc137, regionState=CLOSED 2024-11-13T16:38:29,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=189 2024-11-13T16:38:29,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137, server=f8f85b0bb1d8,38523,1731515529292 because future has completed 2024-11-13T16:38:29,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=194, resume processing ppid=192 2024-11-13T16:38:29,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=192, state=SUCCESS, hasLock=false; CloseRegionProcedure f3ca86f2b19766b9aaff87f74667e279, server=f8f85b0bb1d8,38437,1731515528789 in 266 msec 2024-11-13T16:38:29,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f3ca86f2b19766b9aaff87f74667e279, UNASSIGN in 300 msec 2024-11-13T16:38:29,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-11-13T16:38:29,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure e84a8e5fa28b91e9896dcf71c1fcc137, server=f8f85b0bb1d8,38523,1731515529292 in 287 msec 2024-11-13T16:38:29,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=190 2024-11-13T16:38:29,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=190, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=e84a8e5fa28b91e9896dcf71c1fcc137, UNASSIGN in 324 msec 2024-11-13T16:38:29,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-11-13T16:38:29,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 350 msec 2024-11-13T16:38:29,514 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515909513"}]},"ts":"1731515909513"} 2024-11-13T16:38:29,518 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-13T16:38:29,518 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-13T16:38:29,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 422 msec 2024-11-13T16:38:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=189 2024-11-13T16:38:29,758 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T16:38:29,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithChecksum 2024-11-13T16:38:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:29,761 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=195, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-13T16:38:29,771 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=195, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:29,772 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38523 {}] ipc.CallRunner(138): callId: 350 service: ClientService methodName: Mutate size: 159 connection: 172.17.0.3:54699 deadline: 1731515969764, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=f8f85b0bb1d8 port=38437 startCode=1731515528789. As of locationSeqNum=24. 2024-11-13T16:38:29,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2 , the old value is region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=f8f85b0bb1d8 port=38437 startCode=1731515528789. As of locationSeqNum=24. 2024-11-13T16:38:29,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=f8f85b0bb1d8 port=38437 startCode=1731515528789. As of locationSeqNum=24. 2024-11-13T16:38:29,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38523,1731515529292, seqNum=2 with the new location region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38437,1731515528789, seqNum=24 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=f8f85b0bb1d8 port=38437 startCode=1731515528789. As of locationSeqNum=24. 2024-11-13T16:38:29,783 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:38:29,785 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:38:29,792 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/recovered.edits] 2024-11-13T16:38:29,793 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/recovered.edits] 2024-11-13T16:38:29,800 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/cf/732fda776387496585976f5255d4009c 2024-11-13T16:38:29,808 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279/recovered.edits/9.seqid 2024-11-13T16:38:29,809 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:38:29,847 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf/a09772b4645e4febbb8fc74773a2fcbb to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/cf/a09772b4645e4febbb8fc74773a2fcbb 2024-11-13T16:38:29,886 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137/recovered.edits/9.seqid 2024-11-13T16:38:29,888 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:29,894 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportWithChecksum/e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:38:29,895 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-13T16:38:29,896 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41677, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:29,899 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-13T16:38:29,905 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-11-13T16:38:29,906 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38437 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:38:29,906 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38437 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:38:29,907 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38437 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:29,909 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48211, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-13T16:38:29,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38437,1731515528789, seqNum=28] 2024-11-13T16:38:29,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:29,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59085, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-13T16:38:29,931 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38437 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-13T16:38:29,956 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b202411139c876eb682ab421e85e4fa4caafbd99c_f3ca86f2b19766b9aaff87f74667e279 2024-11-13T16:38:29,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T16:38:29,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T16:38:29,961 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T16:38:29,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-13T16:38:29,971 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024111368f5de6d2505447cacd9ded661cbf915_e84a8e5fa28b91e9896dcf71c1fcc137 2024-11-13T16:38:29,975 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-13T16:38:29,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-13T16:38:29,979 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:29,980 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:29,980 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:29,980 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:29,987 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=195, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:29,995 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-13T16:38:29,998 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6d9281e5cd6679c218f14227797e5659, had cached 0 bytes from a total of 5490 2024-11-13T16:38:29,999 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 33bde9964e83af90386f782c94c4e709, had cached 0 bytes from a total of 15057 2024-11-13T16:38:30,011 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-13T16:38:30,023 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=195, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:30,023 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-13T16:38:30,023 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515910023"}]},"ts":"9223372036854775807"} 2024-11-13T16:38:30,023 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515910023"}]},"ts":"9223372036854775807"} 2024-11-13T16:38:30,033 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:38:30,033 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e84a8e5fa28b91e9896dcf71c1fcc137, NAME => 'testtb-testExportWithChecksum,,1731515816310.e84a8e5fa28b91e9896dcf71c1fcc137.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f3ca86f2b19766b9aaff87f74667e279, NAME => 'testtb-testExportWithChecksum,1,1731515816310.f3ca86f2b19766b9aaff87f74667e279.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:38:30,033 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-13T16:38:30,033 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515910033"}]},"ts":"9223372036854775807"} 2024-11-13T16:38:30,041 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-13T16:38:30,046 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=195, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-13T16:38:30,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 287 msec 2024-11-13T16:38:30,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-13T16:38:30,088 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-13T16:38:30,089 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-13T16:38:30,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-13T16:38:30,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-13T16:38:30,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-13T16:38:30,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-13T16:38:30,152 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=792 (was 802), OpenFileDescriptor=806 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1549 (was 1549), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=2760 (was 3964) 2024-11-13T16:38:30,152 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-11-13T16:38:30,175 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=792, OpenFileDescriptor=806, MaxFileDescriptor=1048576, SystemLoadAverage=1549, ProcessCount=17, AvailableMemoryMB=2758 2024-11-13T16:38:30,176 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-11-13T16:38:30,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T16:38:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=196, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:30,190 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=196, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T16:38:30,196 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=196, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T16:38:30,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 196 2024-11-13T16:38:30,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=196 2024-11-13T16:38:30,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=196 2024-11-13T16:38:30,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742344_1520 (size=454) 2024-11-13T16:38:30,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742344_1520 (size=454) 2024-11-13T16:38:30,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742344_1520 (size=454) 2024-11-13T16:38:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=196 2024-11-13T16:38:30,727 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4e7556eebd23cb4a2700bea3af60234b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:38:30,736 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9639880e5004ddd3885ba2a5ed6a3767, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:38:30,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=196 2024-11-13T16:38:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742345_1521 (size=79) 2024-11-13T16:38:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742345_1521 (size=79) 2024-11-13T16:38:30,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:38:30,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742345_1521 (size=79) 2024-11-13T16:38:30,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 4e7556eebd23cb4a2700bea3af60234b, disabling compactions & flushes 2024-11-13T16:38:30,844 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:30,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:30,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. after waiting 0 ms 2024-11-13T16:38:30,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:30,844 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:30,844 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4e7556eebd23cb4a2700bea3af60234b: Waiting for close lock at 1731515910844Disabling compacts and flushes for region at 1731515910844Disabling writes for close at 1731515910844Writing region close event to WAL at 1731515910844Closed at 1731515910844 2024-11-13T16:38:30,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742346_1522 (size=79) 2024-11-13T16:38:30,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742346_1522 (size=79) 2024-11-13T16:38:30,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742346_1522 (size=79) 2024-11-13T16:38:30,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:38:30,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 9639880e5004ddd3885ba2a5ed6a3767, disabling compactions & flushes 2024-11-13T16:38:30,871 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:30,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:30,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. after waiting 0 ms 2024-11-13T16:38:30,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:30,871 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:30,871 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9639880e5004ddd3885ba2a5ed6a3767: Waiting for close lock at 1731515910871Disabling compacts and flushes for region at 1731515910871Disabling writes for close at 1731515910871Writing region close event to WAL at 1731515910871Closed at 1731515910871 2024-11-13T16:38:30,887 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=196, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T16:38:30,888 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731515910887"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515910887"}]},"ts":"1731515910887"} 2024-11-13T16:38:30,888 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731515910887"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731515910887"}]},"ts":"1731515910887"} 2024-11-13T16:38:30,899 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-13T16:38:30,907 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=196, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T16:38:30,908 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515910907"}]},"ts":"1731515910907"} 2024-11-13T16:38:30,917 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-13T16:38:30,918 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {f8f85b0bb1d8=0} racks are {/default-rack=0} 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-13T16:38:30,923 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-13T16:38:30,923 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-13T16:38:30,923 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-13T16:38:30,923 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-13T16:38:30,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, ASSIGN}, {pid=198, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, ASSIGN}] 2024-11-13T16:38:30,926 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=198, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, ASSIGN 2024-11-13T16:38:30,926 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, ASSIGN 2024-11-13T16:38:30,927 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=198, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,39619,1731515529105; forceNewPlan=false, retain=false 2024-11-13T16:38:30,928 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, ASSIGN; state=OFFLINE, location=f8f85b0bb1d8,38437,1731515528789; forceNewPlan=false, retain=false 2024-11-13T16:38:31,083 INFO [f8f85b0bb1d8:45809 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-13T16:38:31,083 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=198 updating hbase:meta row=9639880e5004ddd3885ba2a5ed6a3767, regionState=OPENING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:38:31,085 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=4e7556eebd23cb4a2700bea3af60234b, regionState=OPENING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:38:31,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, ASSIGN because future has completed 2024-11-13T16:38:31,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=196, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, ASSIGN because future has completed 2024-11-13T16:38:31,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:38:31,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=200, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e7556eebd23cb4a2700bea3af60234b, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:38:31,284 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39305, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T16:38:31,306 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:31,307 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(7752): Opening region: {ENCODED => 4e7556eebd23cb4a2700bea3af60234b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.', STARTKEY => '', ENDKEY => '1'} 2024-11-13T16:38:31,307 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. service=AccessControlService 2024-11-13T16:38:31,307 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:38:31,308 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,308 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:38:31,308 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(7794): checking encryption for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,308 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(7797): checking classloading for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,319 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:31,319 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 9639880e5004ddd3885ba2a5ed6a3767, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.', STARTKEY => '1', ENDKEY => ''} 2024-11-13T16:38:31,319 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. service=AccessControlService 2024-11-13T16:38:31,319 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-13T16:38:31,320 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,320 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T16:38:31,320 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,320 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,323 INFO [StoreOpener-4e7556eebd23cb4a2700bea3af60234b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,328 INFO [StoreOpener-9639880e5004ddd3885ba2a5ed6a3767-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,332 INFO [StoreOpener-4e7556eebd23cb4a2700bea3af60234b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4e7556eebd23cb4a2700bea3af60234b columnFamilyName cf 2024-11-13T16:38:31,334 DEBUG [StoreOpener-4e7556eebd23cb4a2700bea3af60234b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:38:31,337 INFO [StoreOpener-9639880e5004ddd3885ba2a5ed6a3767-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9639880e5004ddd3885ba2a5ed6a3767 columnFamilyName cf 2024-11-13T16:38:31,338 INFO [StoreOpener-4e7556eebd23cb4a2700bea3af60234b-1 {}] regionserver.HStore(327): Store=4e7556eebd23cb4a2700bea3af60234b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:38:31,339 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1038): replaying wal for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,339 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,340 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,342 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1048): stopping wal replay for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,342 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1060): Cleaning up temporary data for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,343 DEBUG [StoreOpener-9639880e5004ddd3885ba2a5ed6a3767-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:38:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=196 2024-11-13T16:38:31,348 INFO [StoreOpener-9639880e5004ddd3885ba2a5ed6a3767-1 {}] regionserver.HStore(327): Store=9639880e5004ddd3885ba2a5ed6a3767/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T16:38:31,349 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,350 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,354 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,356 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1093): writing seq id for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,357 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,357 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,365 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,367 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:38:31,368 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1114): Opened 4e7556eebd23cb4a2700bea3af60234b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60651737, jitterRate=-0.09621869027614594}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:38:31,368 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:31,369 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegion(1006): Region open journal for 4e7556eebd23cb4a2700bea3af60234b: Running coprocessor pre-open hook at 1731515911308Writing region info on filesystem at 1731515911308Initializing all the Stores at 1731515911319 (+11 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515911319Cleaning up temporary data from old regions at 1731515911342 (+23 ms)Running coprocessor post-open hooks at 1731515911368 (+26 ms)Region opened successfully at 1731515911369 (+1 ms) 2024-11-13T16:38:31,377 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b., pid=200, masterSystemTime=1731515911288 2024-11-13T16:38:31,383 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T16:38:31,384 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 9639880e5004ddd3885ba2a5ed6a3767; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74717128, jitterRate=0.11337196826934814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T16:38:31,384 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:31,384 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 9639880e5004ddd3885ba2a5ed6a3767: Running coprocessor pre-open hook at 1731515911320Writing region info on filesystem at 1731515911320Initializing all the Stores at 1731515911327 (+7 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731515911327Cleaning up temporary data from old regions at 1731515911357 (+30 ms)Running coprocessor post-open hooks at 1731515911384 (+27 ms)Region opened successfully at 1731515911384 2024-11-13T16:38:31,391 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:31,392 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=200}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:31,395 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767., pid=199, masterSystemTime=1731515911283 2024-11-13T16:38:31,401 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=4e7556eebd23cb4a2700bea3af60234b, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:38:31,402 DEBUG [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:31,402 INFO [RS_OPEN_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:31,404 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=198 updating hbase:meta row=9639880e5004ddd3885ba2a5ed6a3767, regionState=OPEN, openSeqNum=2, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:38:31,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=200, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e7556eebd23cb4a2700bea3af60234b, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:38:31,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=198, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:38:31,423 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=f8f85b0bb1d8,38437,1731515528789, table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:38:31,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=200, resume processing ppid=197 2024-11-13T16:38:31,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 4e7556eebd23cb4a2700bea3af60234b, server=f8f85b0bb1d8,38437,1731515528789 in 305 msec 2024-11-13T16:38:31,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=196, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, ASSIGN in 505 msec 2024-11-13T16:38:31,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=198 2024-11-13T16:38:31,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; OpenRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767, server=f8f85b0bb1d8,39619,1731515529105 in 321 msec 2024-11-13T16:38:31,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-11-13T16:38:31,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, ASSIGN in 533 msec 2024-11-13T16:38:31,476 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=196, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T16:38:31,476 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515911476"}]},"ts":"1731515911476"} 2024-11-13T16:38:31,486 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-13T16:38:31,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=196, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T16:38:31,495 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-13T16:38:31,505 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38437 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-13T16:38:31,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:31,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:31,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:31,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:38:31,521 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:31,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,532 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,534 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 1.3520 sec 2024-11-13T16:38:31,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:31,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-13T16:38:32,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=196 2024-11-13T16:38:32,360 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T16:38:32,360 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:32,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:32,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:32,366 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:38:32,372 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:32,382 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:32,400 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:32,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T16:38:32,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515912409 (current time:1731515912409). 2024-11-13T16:38:32,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:38:32,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-13T16:38:32,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:38:32,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e7a0885, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:32,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:38:32,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:38:32,420 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:38:32,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:38:32,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:38:32,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c4d2aed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:32,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:38:32,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:38:32,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:32,430 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:38:32,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a965221, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:32,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:38:32,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:38:32,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:32,439 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:32,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:38:32,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:38:32,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:32,445 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:38:32,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67bbc11e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:32,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:38:32,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:38:32,448 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:38:32,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:38:32,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:38:32,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3d9b6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:32,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:38:32,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:38:32,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:32,451 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:38:32,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49c9b071, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:32,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:38:32,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:38:32,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:32,456 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:32,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38437,1731515528789, seqNum=28] 2024-11-13T16:38:32,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:32,464 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:32,469 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:38:32,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:38:32,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:32,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:32,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-13T16:38:32,470 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:38:32,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:38:32,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T16:38:32,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-13T16:38:32,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T16:38:32,497 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:38:32,502 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:38:32,519 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:38:32,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742347_1523 (size=203) 2024-11-13T16:38:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742347_1523 (size=203) 2024-11-13T16:38:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742347_1523 (size=203) 2024-11-13T16:38:32,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T16:38:32,608 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:38:32,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767}] 2024-11-13T16:38:32,609 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:32,610 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:32,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-13T16:38:32,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:32,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for 9639880e5004ddd3885ba2a5ed6a3767: 2024-11-13T16:38:32,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T16:38:32,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 4e7556eebd23cb4a2700bea3af60234b: 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:38:32,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-13T16:38:32,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T16:38:32,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742348_1524 (size=82) 2024-11-13T16:38:32,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742348_1524 (size=82) 2024-11-13T16:38:32,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742348_1524 (size=82) 2024-11-13T16:38:32,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:32,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-13T16:38:32,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-13T16:38:32,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:32,864 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:32,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b in 259 msec 2024-11-13T16:38:32,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742349_1525 (size=82) 2024-11-13T16:38:32,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742349_1525 (size=82) 2024-11-13T16:38:32,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742349_1525 (size=82) 2024-11-13T16:38:32,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:32,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-13T16:38:32,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-13T16:38:32,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:32,907 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:32,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=203, resume processing ppid=201 2024-11-13T16:38:32,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767 in 304 msec 2024-11-13T16:38:32,929 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:38:32,930 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:38:32,935 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:38:32,935 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:38:32,935 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:38:32,942 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-13T16:38:32,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742350_1526 (size=74) 2024-11-13T16:38:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742350_1526 (size=74) 2024-11-13T16:38:33,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742350_1526 (size=74) 2024-11-13T16:38:33,012 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:38:33,013 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:33,016 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:33,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742351_1527 (size=697) 2024-11-13T16:38:33,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742351_1527 (size=697) 2024-11-13T16:38:33,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742351_1527 (size=697) 2024-11-13T16:38:33,119 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:38:33,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T16:38:33,167 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:38:33,168 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:33,179 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:38:33,179 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-13T16:38:33,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 701 msec 2024-11-13T16:38:33,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0009_000001 (auth:SIMPLE) from 127.0.0.1:35392 2024-11-13T16:38:33,526 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_1/usercache/jenkins/appcache/application_1731515545832_0009/container_1731515545832_0009_01_000001/launch_container.sh] 2024-11-13T16:38:33,526 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_1/usercache/jenkins/appcache/application_1731515545832_0009/container_1731515545832_0009_01_000001/container_tokens] 2024-11-13T16:38:33,526 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_1/usercache/jenkins/appcache/application_1731515545832_0009/container_1731515545832_0009_01_000001/sysfs] 2024-11-13T16:38:33,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-13T16:38:33,628 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T16:38:33,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38437 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:38:33,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39619 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. with WAL disabled. Data may be lost in the event of a crash. 2024-11-13T16:38:33,658 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:33,664 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:33,664 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:33,665 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T16:38:33,668 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:33,682 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:33,692 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-13T16:38:33,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T16:38:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731515913698 (current time:1731515913698). 2024-11-13T16:38:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-13T16:38:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-13T16:38:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-13T16:38:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@93febff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:38:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:38:33,702 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:38:33,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:38:33,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:38:33,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502f82c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:33,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:38:33,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:38:33,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:33,704 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:38:33,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6912a752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:38:33,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:38:33,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:33,708 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45918, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:33,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:38:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:38:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:33,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:33,710 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:38:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14d101f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ClusterIdFetcher(90): Going to request f8f85b0bb1d8,45809,-1 for getting cluster id 2024-11-13T16:38:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T16:38:33,717 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '189d46b3-8410-4d36-80e0-1d6ce58cf995' 2024-11-13T16:38:33,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T16:38:33,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "189d46b3-8410-4d36-80e0-1d6ce58cf995" 2024-11-13T16:38:33,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c14d547, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:33,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [f8f85b0bb1d8,45809,-1] 2024-11-13T16:38:33,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T16:38:33,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:33,723 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55454, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T16:38:33,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b2ece39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T16:38:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T16:38:33,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=f8f85b0bb1d8,39619,1731515529105, seqNum=-1] 2024-11-13T16:38:33,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:33,739 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:33,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., hostname=f8f85b0bb1d8,38437,1731515528789, seqNum=28] 2024-11-13T16:38:33,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T16:38:33,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T16:38:33,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809. 2024-11-13T16:38:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor282.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T16:38:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:33,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:38:33,752 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:38:33,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-13T16:38:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-13T16:38:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-13T16:38:33,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 204 2024-11-13T16:38:33,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T16:38:33,776 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-13T16:38:33,783 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-13T16:38:33,797 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-13T16:38:33,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742352_1528 (size=198) 2024-11-13T16:38:33,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742352_1528 (size=198) 2024-11-13T16:38:33,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742352_1528 (size=198) 2024-11-13T16:38:33,823 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-13T16:38:33,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b}, {pid=206, ppid=204, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767}] 2024-11-13T16:38:33,825 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=204, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:33,825 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=204, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T16:38:33,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39619 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=206 2024-11-13T16:38:33,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:33,979 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.HRegion(2902): Flushing 9639880e5004ddd3885ba2a5ed6a3767 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-13T16:38:33,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38437 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-11-13T16:38:33,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:33,983 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 4e7556eebd23cb4a2700bea3af60234b 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-13T16:38:34,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767 is 71, key is 217c4f00887f7f644ba7ab401b01224a/cf:q/1731515913640/Put/seqid=0 2024-11-13T16:38:34,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b is 71, key is 018f5951a9f96b1eec6c956289ea8e49/cf:q/1731515913638/Put/seqid=0 2024-11-13T16:38:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742353_1529 (size=8242) 2024-11-13T16:38:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742353_1529 (size=8242) 2024-11-13T16:38:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742353_1529 (size=8242) 2024-11-13T16:38:34,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:38:34,054 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:34,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/.tmp/cf/f13c4a22ad1a414baad11ebb526b04e3, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=9639880e5004ddd3885ba2a5ed6a3767] 2024-11-13T16:38:34,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/.tmp/cf/f13c4a22ad1a414baad11ebb526b04e3 is 220, key is 172cf1eaa8975445847b466349818cbb4/cf:q/1731515913640/Put/seqid=0 2024-11-13T16:38:34,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742354_1530 (size=5032) 2024-11-13T16:38:34,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742354_1530 (size=5032) 2024-11-13T16:38:34,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T16:38:34,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742354_1530 (size=5032) 2024-11-13T16:38:34,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:38:34,124 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:34,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/.tmp/cf/da93b3c0b1364bf49b743387a738d799, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=4e7556eebd23cb4a2700bea3af60234b] 2024-11-13T16:38:34,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/.tmp/cf/da93b3c0b1364bf49b743387a738d799 is 220, key is 024fe6e7eacda37c18d4c4a87eb2f56be/cf:q/1731515913638/Put/seqid=0 2024-11-13T16:38:34,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742355_1531 (size=15743) 2024-11-13T16:38:34,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742355_1531 (size=15743) 2024-11-13T16:38:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742355_1531 (size=15743) 2024-11-13T16:38:34,163 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/.tmp/cf/f13c4a22ad1a414baad11ebb526b04e3 2024-11-13T16:38:34,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742356_1532 (size=5742) 2024-11-13T16:38:34,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742356_1532 (size=5742) 2024-11-13T16:38:34,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742356_1532 (size=5742) 2024-11-13T16:38:34,187 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/.tmp/cf/da93b3c0b1364bf49b743387a738d799 2024-11-13T16:38:34,198 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/.tmp/cf/da93b3c0b1364bf49b743387a738d799 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf/da93b3c0b1364bf49b743387a738d799 2024-11-13T16:38:34,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/.tmp/cf/f13c4a22ad1a414baad11ebb526b04e3 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf/f13c4a22ad1a414baad11ebb526b04e3 2024-11-13T16:38:34,212 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf/f13c4a22ad1a414baad11ebb526b04e3, entries=48, sequenceid=6, filesize=15.4 K 2024-11-13T16:38:34,215 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 9639880e5004ddd3885ba2a5ed6a3767 in 236ms, sequenceid=6, compaction requested=false 2024-11-13T16:38:34,215 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-13T16:38:34,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.HRegion(2603): Flush status journal for 9639880e5004ddd3885ba2a5ed6a3767: 2024-11-13T16:38:34,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T16:38:34,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:38:34,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf/f13c4a22ad1a414baad11ebb526b04e3] hfiles 2024-11-13T16:38:34,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf/f13c4a22ad1a414baad11ebb526b04e3 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,222 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf/da93b3c0b1364bf49b743387a738d799, entries=2, sequenceid=6, filesize=5.6 K 2024-11-13T16:38:34,231 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 4e7556eebd23cb4a2700bea3af60234b in 247ms, sequenceid=6, compaction requested=false 2024-11-13T16:38:34,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 4e7556eebd23cb4a2700bea3af60234b: 2024-11-13T16:38:34,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-13T16:38:34,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-13T16:38:34,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf/da93b3c0b1364bf49b743387a738d799] hfiles 2024-11-13T16:38:34,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf/da93b3c0b1364bf49b743387a738d799 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742357_1533 (size=121) 2024-11-13T16:38:34,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742357_1533 (size=121) 2024-11-13T16:38:34,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742357_1533 (size=121) 2024-11-13T16:38:34,287 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:38:34,287 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=206}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=206 2024-11-13T16:38:34,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=206 2024-11-13T16:38:34,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:34,288 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=204, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:34,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=204, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767 in 476 msec 2024-11-13T16:38:34,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742358_1534 (size=121) 2024-11-13T16:38:34,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742358_1534 (size=121) 2024-11-13T16:38:34,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742358_1534 (size=121) 2024-11-13T16:38:34,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T16:38:34,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:38:34,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/f8f85b0bb1d8:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-11-13T16:38:34,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-11-13T16:38:34,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:34,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=204, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:34,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-13T16:38:34,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4e7556eebd23cb4a2700bea3af60234b in 897 msec 2024-11-13T16:38:34,722 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-13T16:38:34,725 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-13T16:38:34,726 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-13T16:38:34,726 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-13T16:38:34,726 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T16:38:34,728 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b] hfiles 2024-11-13T16:38:34,728 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:38:34,728 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:38:34,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742359_1535 (size=305) 2024-11-13T16:38:34,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742359_1535 (size=305) 2024-11-13T16:38:34,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742359_1535 (size=305) 2024-11-13T16:38:34,759 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-13T16:38:34,759 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,760 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742360_1536 (size=1007) 2024-11-13T16:38:34,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742360_1536 (size=1007) 2024-11-13T16:38:34,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742360_1536 (size=1007) 2024-11-13T16:38:34,824 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-13T16:38:34,830 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-13T16:38:34,831 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,832 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=204, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-13T16:38:34,833 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 204 2024-11-13T16:38:34,835 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=204, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 1.0790 sec 2024-11-13T16:38:34,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-13T16:38:34,909 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T16:38:34,909 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909 2024-11-13T16:38:34,909 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:37215, tgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909, rawTgtDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909, srcFsUri=hdfs://localhost:37215, srcDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:38:34,950 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:37215, inputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce 2024-11-13T16:38:34,951 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:34,958 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-13T16:38:34,968 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:38:34,975 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:35,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742361_1537 (size=198) 2024-11-13T16:38:35,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742361_1537 (size=198) 2024-11-13T16:38:35,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742361_1537 (size=198) 2024-11-13T16:38:35,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742362_1538 (size=1007) 2024-11-13T16:38:35,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742362_1538 (size=1007) 2024-11-13T16:38:35,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742362_1538 (size=1007) 2024-11-13T16:38:35,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:35,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:35,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:35,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:38:36,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-8700236609051455807.jar 2024-11-13T16:38:36,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop-9538798224142933754.jar 2024-11-13T16:38:36,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,751 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,752 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-13T16:38:36,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-13T16:38:36,753 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-13T16:38:36,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-13T16:38:36,754 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-13T16:38:36,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-13T16:38:36,755 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-13T16:38:36,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-13T16:38:36,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-13T16:38:36,756 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-13T16:38:36,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-13T16:38:36,757 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-13T16:38:36,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:38:36,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:38:36,758 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:38:36,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:38:36,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-13T16:38:36,759 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:38:36,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-13T16:38:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742363_1539 (size=131440) 2024-11-13T16:38:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742363_1539 (size=131440) 2024-11-13T16:38:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742363_1539 (size=131440) 2024-11-13T16:38:36,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742364_1540 (size=4188619) 2024-11-13T16:38:36,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742364_1540 (size=4188619) 2024-11-13T16:38:36,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742364_1540 (size=4188619) 2024-11-13T16:38:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742365_1541 (size=1323991) 2024-11-13T16:38:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742365_1541 (size=1323991) 2024-11-13T16:38:36,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742365_1541 (size=1323991) 2024-11-13T16:38:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742366_1542 (size=903731) 2024-11-13T16:38:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742366_1542 (size=903731) 2024-11-13T16:38:36,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742366_1542 (size=903731) 2024-11-13T16:38:36,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742367_1543 (size=8360083) 2024-11-13T16:38:36,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742367_1543 (size=8360083) 2024-11-13T16:38:36,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742367_1543 (size=8360083) 2024-11-13T16:38:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742368_1544 (size=1877034) 2024-11-13T16:38:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742368_1544 (size=1877034) 2024-11-13T16:38:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742368_1544 (size=1877034) 2024-11-13T16:38:37,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742369_1545 (size=77835) 2024-11-13T16:38:37,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742369_1545 (size=77835) 2024-11-13T16:38:37,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742369_1545 (size=77835) 2024-11-13T16:38:37,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742370_1546 (size=30949) 2024-11-13T16:38:37,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742370_1546 (size=30949) 2024-11-13T16:38:37,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742370_1546 (size=30949) 2024-11-13T16:38:37,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742371_1547 (size=1597327) 2024-11-13T16:38:37,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742371_1547 (size=1597327) 2024-11-13T16:38:37,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742371_1547 (size=1597327) 2024-11-13T16:38:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742372_1548 (size=4695811) 2024-11-13T16:38:37,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742372_1548 (size=4695811) 2024-11-13T16:38:37,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742372_1548 (size=4695811) 2024-11-13T16:38:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742373_1549 (size=6424544) 2024-11-13T16:38:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742373_1549 (size=6424544) 2024-11-13T16:38:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742373_1549 (size=6424544) 2024-11-13T16:38:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742374_1550 (size=232957) 2024-11-13T16:38:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742374_1550 (size=232957) 2024-11-13T16:38:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742374_1550 (size=232957) 2024-11-13T16:38:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742375_1551 (size=440392) 2024-11-13T16:38:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742375_1551 (size=440392) 2024-11-13T16:38:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742375_1551 (size=440392) 2024-11-13T16:38:37,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742376_1552 (size=127628) 2024-11-13T16:38:37,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742376_1552 (size=127628) 2024-11-13T16:38:37,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742376_1552 (size=127628) 2024-11-13T16:38:37,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742377_1553 (size=20406) 2024-11-13T16:38:37,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742377_1553 (size=20406) 2024-11-13T16:38:37,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742377_1553 (size=20406) 2024-11-13T16:38:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:38:37,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-13T16:38:37,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-13T16:38:37,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742378_1554 (size=5175431) 2024-11-13T16:38:37,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742378_1554 (size=5175431) 2024-11-13T16:38:37,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742378_1554 (size=5175431) 2024-11-13T16:38:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742379_1555 (size=217634) 2024-11-13T16:38:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742379_1555 (size=217634) 2024-11-13T16:38:37,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742379_1555 (size=217634) 2024-11-13T16:38:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742380_1556 (size=1832290) 2024-11-13T16:38:37,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742380_1556 (size=1832290) 2024-11-13T16:38:37,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742380_1556 (size=1832290) 2024-11-13T16:38:37,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742381_1557 (size=322274) 2024-11-13T16:38:37,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742381_1557 (size=322274) 2024-11-13T16:38:37,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742381_1557 (size=322274) 2024-11-13T16:38:37,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742382_1558 (size=503880) 2024-11-13T16:38:37,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742382_1558 (size=503880) 2024-11-13T16:38:37,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742382_1558 (size=503880) 2024-11-13T16:38:37,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742383_1559 (size=29229) 2024-11-13T16:38:37,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742383_1559 (size=29229) 2024-11-13T16:38:37,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742383_1559 (size=29229) 2024-11-13T16:38:38,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742384_1560 (size=24096) 2024-11-13T16:38:38,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742384_1560 (size=24096) 2024-11-13T16:38:38,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742384_1560 (size=24096) 2024-11-13T16:38:38,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742385_1561 (size=111872) 2024-11-13T16:38:38,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742385_1561 (size=111872) 2024-11-13T16:38:38,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742385_1561 (size=111872) 2024-11-13T16:38:38,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742386_1562 (size=45609) 2024-11-13T16:38:38,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742386_1562 (size=45609) 2024-11-13T16:38:38,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742386_1562 (size=45609) 2024-11-13T16:38:38,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742387_1563 (size=136454) 2024-11-13T16:38:38,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742387_1563 (size=136454) 2024-11-13T16:38:38,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742387_1563 (size=136454) 2024-11-13T16:38:38,785 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-13T16:38:38,790 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-13T16:38:38,796 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=33.9 K 2024-11-13T16:38:38,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742388_1564 (size=770) 2024-11-13T16:38:38,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742388_1564 (size=770) 2024-11-13T16:38:38,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742388_1564 (size=770) 2024-11-13T16:38:39,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742389_1565 (size=15) 2024-11-13T16:38:39,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742389_1565 (size=15) 2024-11-13T16:38:39,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742389_1565 (size=15) 2024-11-13T16:38:39,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742390_1566 (size=303898) 2024-11-13T16:38:39,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742390_1566 (size=303898) 2024-11-13T16:38:39,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742390_1566 (size=303898) 2024-11-13T16:38:39,211 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:38:39,211 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-13T16:38:39,465 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0010_000001 (auth:SIMPLE) from 127.0.0.1:43782 2024-11-13T16:38:43,219 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:38:49,905 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0010_000001 (auth:SIMPLE) from 127.0.0.1:58068 2024-11-13T16:38:50,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742391_1567 (size=349572) 2024-11-13T16:38:50,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742391_1567 (size=349572) 2024-11-13T16:38:50,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742391_1567 (size=349572) 2024-11-13T16:38:52,358 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0010_000001 (auth:SIMPLE) from 127.0.0.1:55698 2024-11-13T16:38:52,388 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 618c30cdb779cfeb9cea6e5be1e51395, had cached 0 bytes from a total of 5642 2024-11-13T16:38:56,082 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-13T16:38:56,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=68.10 KB heapSize=108.73 KB 2024-11-13T16:38:56,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/info/4efb0d27a0d24ca5b741348a4ee35c35 is 209, key is testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767./info:regioninfo/1731515911404/Put/seqid=0 2024-11-13T16:38:56,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742392_1568 (size=17729) 2024-11-13T16:38:56,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742392_1568 (size=17729) 2024-11-13T16:38:56,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742392_1568 (size=17729) 2024-11-13T16:38:56,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.47 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/info/4efb0d27a0d24ca5b741348a4ee35c35 2024-11-13T16:38:56,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/ns/2646067b2e974ac39952eed8da14dec0 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d./ns:/1731515770208/DeleteFamily/seqid=0 2024-11-13T16:38:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742393_1569 (size=7506) 2024-11-13T16:38:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742393_1569 (size=7506) 2024-11-13T16:38:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742393_1569 (size=7506) 2024-11-13T16:38:56,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.01 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/ns/2646067b2e974ac39952eed8da14dec0 2024-11-13T16:38:56,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/rep_barrier/98f1f5eb385b4cc1a22d72ccbe987eb9 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d./rep_barrier:/1731515770208/DeleteFamily/seqid=0 2024-11-13T16:38:56,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742394_1570 (size=7746) 2024-11-13T16:38:56,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742394_1570 (size=7746) 2024-11-13T16:38:56,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742394_1570 (size=7746) 2024-11-13T16:38:56,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/rep_barrier/98f1f5eb385b4cc1a22d72ccbe987eb9 2024-11-13T16:38:56,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/table/198e5138b3884b9a93cbd5070ef8eb46 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731515745265.5b7d129670ad8b90d0358e1bad77535d./table:/1731515770208/DeleteFamily/seqid=0 2024-11-13T16:38:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742395_1571 (size=8590) 2024-11-13T16:38:56,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742395_1571 (size=8590) 2024-11-13T16:38:56,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742395_1571 (size=8590) 2024-11-13T16:38:56,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.52 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/table/198e5138b3884b9a93cbd5070ef8eb46 2024-11-13T16:38:56,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/info/4efb0d27a0d24ca5b741348a4ee35c35 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/info/4efb0d27a0d24ca5b741348a4ee35c35 2024-11-13T16:38:56,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/info/4efb0d27a0d24ca5b741348a4ee35c35, entries=95, sequenceid=194, filesize=17.3 K 2024-11-13T16:38:56,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/ns/2646067b2e974ac39952eed8da14dec0 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/ns/2646067b2e974ac39952eed8da14dec0 2024-11-13T16:38:56,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/ns/2646067b2e974ac39952eed8da14dec0, entries=21, sequenceid=194, filesize=7.3 K 2024-11-13T16:38:56,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/rep_barrier/98f1f5eb385b4cc1a22d72ccbe987eb9 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/rep_barrier/98f1f5eb385b4cc1a22d72ccbe987eb9 2024-11-13T16:38:56,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/rep_barrier/98f1f5eb385b4cc1a22d72ccbe987eb9, entries=19, sequenceid=194, filesize=7.6 K 2024-11-13T16:38:56,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/table/198e5138b3884b9a93cbd5070ef8eb46 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/table/198e5138b3884b9a93cbd5070ef8eb46 2024-11-13T16:38:56,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/table/198e5138b3884b9a93cbd5070ef8eb46, entries=35, sequenceid=194, filesize=8.4 K 2024-11-13T16:38:56,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.10 KB/69732, heapSize ~108.67 KB/111280, currentSize=0 B/0 for 1588230740 in 675ms, sequenceid=194, compaction requested=false 2024-11-13T16:38:56,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T16:38:58,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742396_1572 (size=15743) 2024-11-13T16:38:58,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742396_1572 (size=15743) 2024-11-13T16:38:58,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742396_1572 (size=15743) 2024-11-13T16:38:58,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742397_1573 (size=8242) 2024-11-13T16:38:58,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742397_1573 (size=8242) 2024-11-13T16:38:58,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742397_1573 (size=8242) 2024-11-13T16:38:58,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742398_1574 (size=5742) 2024-11-13T16:38:58,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742398_1574 (size=5742) 2024-11-13T16:38:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742398_1574 (size=5742) 2024-11-13T16:38:58,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742399_1575 (size=5032) 2024-11-13T16:38:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742399_1575 (size=5032) 2024-11-13T16:38:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742399_1575 (size=5032) 2024-11-13T16:38:58,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742400_1576 (size=17473) 2024-11-13T16:38:58,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742400_1576 (size=17473) 2024-11-13T16:38:58,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742400_1576 (size=17473) 2024-11-13T16:38:58,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742401_1577 (size=476) 2024-11-13T16:38:58,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742401_1577 (size=476) 2024-11-13T16:38:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742401_1577 (size=476) 2024-11-13T16:38:58,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742402_1578 (size=17473) 2024-11-13T16:38:58,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742402_1578 (size=17473) 2024-11-13T16:38:58,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742402_1578 (size=17473) 2024-11-13T16:38:58,950 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0010/container_1731515545832_0010_01_000002/launch_container.sh] 2024-11-13T16:38:58,951 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0010/container_1731515545832_0010_01_000002/container_tokens] 2024-11-13T16:38:58,951 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-0_3/usercache/jenkins/appcache/application_1731515545832_0010/container_1731515545832_0010_01_000002/sysfs] 2024-11-13T16:38:58,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742403_1579 (size=349572) 2024-11-13T16:38:58,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742403_1579 (size=349572) 2024-11-13T16:38:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742403_1579 (size=349572) 2024-11-13T16:38:59,011 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731515545832_0010_000001 (auth:SIMPLE) from 127.0.0.1:48282 2024-11-13T16:38:59,984 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 41571 2024-11-13T16:39:00,531 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-13T16:39:00,531 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-13T16:39:00,562 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,563 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-13T16:39:00,563 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-13T16:39:00,563 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,564 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-13T16:39:00,564 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-13T16:39:00,564 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-315877880_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,571 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-13T16:39:00,571 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/export-test/export-1731515914909/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-13T16:39:00,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=207, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=207 2024-11-13T16:39:00,594 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515940594"}]},"ts":"1731515940594"} 2024-11-13T16:39:00,597 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-13T16:39:00,597 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-13T16:39:00,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-13T16:39:00,603 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=208, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, UNASSIGN}, {pid=210, ppid=208, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, UNASSIGN}] 2024-11-13T16:39:00,604 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=210, ppid=208, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, UNASSIGN 2024-11-13T16:39:00,604 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=208, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, UNASSIGN 2024-11-13T16:39:00,605 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=4e7556eebd23cb4a2700bea3af60234b, regionState=CLOSING, regionLocation=f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:39:00,608 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=f8f85b0bb1d8,38437,1731515528789, table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T16:39:00,610 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=210 updating hbase:meta row=9639880e5004ddd3885ba2a5ed6a3767, regionState=CLOSING, regionLocation=f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:39:00,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=208, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, UNASSIGN because future has completed 2024-11-13T16:39:00,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, UNASSIGN because future has completed 2024-11-13T16:39:00,614 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:39:00,615 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-13T16:39:00,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=212, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4e7556eebd23cb4a2700bea3af60234b, server=f8f85b0bb1d8,38437,1731515528789}] 2024-11-13T16:39:00,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767, server=f8f85b0bb1d8,39619,1731515529105}] 2024-11-13T16:39:00,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=207 2024-11-13T16:39:00,774 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:39:00,774 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 9639880e5004ddd3885ba2a5ed6a3767, disabling compactions & flushes 2024-11-13T16:39:00,775 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:39:00,775 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] handler.UnassignRegionHandler(122): Close 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. after waiting 0 ms 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1722): Closing 4e7556eebd23cb4a2700bea3af60234b, disabling compactions & flushes 2024-11-13T16:39:00,775 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. after waiting 0 ms 2024-11-13T16:39:00,775 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:39:00,797 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:39:00,797 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:00,797 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767. 2024-11-13T16:39:00,797 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 9639880e5004ddd3885ba2a5ed6a3767: Waiting for close lock at 1731515940775Running coprocessor pre-close hooks at 1731515940775Disabling compacts and flushes for region at 1731515940775Disabling writes for close at 1731515940775Writing region close event to WAL at 1731515940791 (+16 ms)Running coprocessor post-close hooks at 1731515940797 (+6 ms)Closed at 1731515940797 2024-11-13T16:39:00,807 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T16:39:00,807 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:39:00,808 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=210 updating hbase:meta row=9639880e5004ddd3885ba2a5ed6a3767, regionState=CLOSED 2024-11-13T16:39:00,809 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:00,809 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b. 2024-11-13T16:39:00,809 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] regionserver.HRegion(1676): Region close journal for 4e7556eebd23cb4a2700bea3af60234b: Waiting for close lock at 1731515940775Running coprocessor pre-close hooks at 1731515940775Disabling compacts and flushes for region at 1731515940775Disabling writes for close at 1731515940775Writing region close event to WAL at 1731515940794 (+19 ms)Running coprocessor post-close hooks at 1731515940809 (+15 ms)Closed at 1731515940809 2024-11-13T16:39:00,811 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION, pid=212}] handler.UnassignRegionHandler(157): Closed 4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:39:00,812 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=4e7556eebd23cb4a2700bea3af60234b, regionState=CLOSED 2024-11-13T16:39:00,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=210, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767, server=f8f85b0bb1d8,39619,1731515529105 because future has completed 2024-11-13T16:39:00,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=212, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4e7556eebd23cb4a2700bea3af60234b, server=f8f85b0bb1d8,38437,1731515528789 because future has completed 2024-11-13T16:39:00,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=210 2024-11-13T16:39:00,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=210, state=SUCCESS, hasLock=false; CloseRegionProcedure 9639880e5004ddd3885ba2a5ed6a3767, server=f8f85b0bb1d8,39619,1731515529105 in 199 msec 2024-11-13T16:39:00,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=212, resume processing ppid=209 2024-11-13T16:39:00,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=9639880e5004ddd3885ba2a5ed6a3767, UNASSIGN in 214 msec 2024-11-13T16:39:00,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 4e7556eebd23cb4a2700bea3af60234b, server=f8f85b0bb1d8,38437,1731515528789 in 201 msec 2024-11-13T16:39:00,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=208 2024-11-13T16:39:00,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=208, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e7556eebd23cb4a2700bea3af60234b, UNASSIGN in 217 msec 2024-11-13T16:39:00,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-13T16:39:00,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 224 msec 2024-11-13T16:39:00,836 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731515940835"}]},"ts":"1731515940835"} 2024-11-13T16:39:00,849 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-13T16:39:00,850 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-13T16:39:00,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 264 msec 2024-11-13T16:39:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=207 2024-11-13T16:39:00,908 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T16:39:00,911 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,913 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=213, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,916 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=213, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,927 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38437 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:00,939 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:39:00,939 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:39:00,970 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/recovered.edits] 2024-11-13T16:39:00,970 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf, FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/recovered.edits] 2024-11-13T16:39:00,982 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf/da93b3c0b1364bf49b743387a738d799 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/cf/da93b3c0b1364bf49b743387a738d799 2024-11-13T16:39:00,992 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf/f13c4a22ad1a414baad11ebb526b04e3 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/cf/f13c4a22ad1a414baad11ebb526b04e3 2024-11-13T16:39:00,994 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b/recovered.edits/9.seqid 2024-11-13T16:39:00,998 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:39:01,000 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/recovered.edits/9.seqid to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767/recovered.edits/9.seqid 2024-11-13T16:39:01,001 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testtb-testExportFileSystemStateWithSkipTmp/9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:39:01,002 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-13T16:39:01,006 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-13T16:39:01,008 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-11-13T16:39:01,012 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767 to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241113706dab08c00a48e3818e001033a8c894_9639880e5004ddd3885ba2a5ed6a3767 2024-11-13T16:39:01,014 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b to hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024111322d4e523b82b49d8bce01cdebf45671b_4e7556eebd23cb4a2700bea3af60234b 2024-11-13T16:39:01,014 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-13T16:39:01,027 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=213, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,063 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-13T16:39:01,069 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-13T16:39:01,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=213, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,075 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-13T16:39:01,075 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515941075"}]},"ts":"9223372036854775807"} 2024-11-13T16:39:01,076 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731515941075"}]},"ts":"9223372036854775807"} 2024-11-13T16:39:01,079 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-13T16:39:01,079 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4e7556eebd23cb4a2700bea3af60234b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731515910179.4e7556eebd23cb4a2700bea3af60234b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9639880e5004ddd3885ba2a5ed6a3767, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767.', STARTKEY => '1', ENDKEY => ''}] 2024-11-13T16:39:01,079 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-13T16:39:01,080 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731515941079"}]},"ts":"9223372036854775807"} 2024-11-13T16:39:01,083 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-13T16:39:01,090 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=213, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 179 msec 2024-11-13T16:39:01,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,582 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T16:39:01,582 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T16:39:01,582 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T16:39:01,582 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-13T16:39:01,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-13T16:39:01,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:39:01,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:39:01,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:39:01,680 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,681 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-13T16:39:01,681 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-13T16:39:01,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-13T16:39:01,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-13T16:39:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:01,712 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=805 (was 792) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:47290 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7707 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41289 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:35500 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-315877880_22 at /127.0.0.1:48966 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44519 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 110751) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (565235355) connection to localhost/127.0.0.1:44519 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1662084447_1 at /127.0.0.1:48948 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1662084447_1 at /127.0.0.1:47278 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=809 (was 806) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1442 (was 1549), ProcessCount=14 (was 17), AvailableMemoryMB=2883 (was 2758) - AvailableMemoryMB LEAK? - 2024-11-13T16:39:01,712 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-13T16:39:01,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-13T16:39:01,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3747195e{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T16:39:01,724 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6aa7bca0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:39:01,724 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:39:01,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b9940e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T16:39:01,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24dfe4c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:39:01,737 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1731515545832_0010_01_000001 is : 143 2024-11-13T16:39:01,754 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_666607761/yarn-5204613909/MiniMRCluster_666607761-localDir-nm-1_1/usercache/jenkins/appcache/application_1731515545832_0010/container_1731515545832_0010_01_000001/sysfs] 2024-11-13T16:39:05,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:39:06,333 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:39:07,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-13T16:39:13,215 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:39:14,999 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6d9281e5cd6679c218f14227797e5659, had cached 0 bytes from a total of 5490 2024-11-13T16:39:14,999 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 33bde9964e83af90386f782c94c4e709, had cached 0 bytes from a total of 15057 2024-11-13T16:39:18,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7729f16{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-13T16:39:18,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f1603c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:39:18,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:39:18,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1011848b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T16:39:18,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43189f04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:39:35,773 ERROR [Thread[Thread-404,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-13T16:39:35,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1e339e34{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-13T16:39:35,797 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@251a1092{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:39:35,797 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:39:35,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b9969de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T16:39:35,798 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bb896b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:39:35,818 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-13T16:39:35,840 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-13T16:39:35,840 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-13T16:39:35,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741830_1006 (size=988621) 2024-11-13T16:39:35,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741830_1006 (size=988621) 2024-11-13T16:39:35,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741830_1006 (size=988621) 2024-11-13T16:39:35,858 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-13T16:39:35,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37e897fc{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-13T16:39:35,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25f3d501{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:39:35,868 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:39:35,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e876d54{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-13T16:39:35,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57fd5d16{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:39:35,871 ERROR [Thread[Thread-386,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-13T16:39:35,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-13T16:39:35,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T16:39:35,872 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T16:39:35,872 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:39:35,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:35,872 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:35,872 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T16:39:35,872 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1431761155, stopped=false 2024-11-13T16:39:35,873 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:35,873 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-13T16:39:35,873 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=f8f85b0bb1d8,45809,1731515527071 2024-11-13T16:39:35,874 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T16:39:35,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:39:36,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T16:39:36,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T16:39:36,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:39:36,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:39:36,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T16:39:36,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:39:36,052 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T16:39:36,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T16:39:36,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:39:36,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:39:36,053 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T16:39:36,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:39:36,053 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:39:36,053 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,053 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:39:36,054 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T16:39:36,054 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f8f85b0bb1d8,38437,1731515528789' ***** 2024-11-13T16:39:36,054 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(878): Closing user regions 2024-11-13T16:39:36,054 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,054 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(3091): Received CLOSE for 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:39:36,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T16:39:36,054 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f8f85b0bb1d8,39619,1731515529105' ***** 2024-11-13T16:39:36,054 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T16:39:36,054 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'f8f85b0bb1d8,38523,1731515529292' ***** 2024-11-13T16:39:36,054 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T16:39:36,055 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T16:39:36,055 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T16:39:36,055 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6d9281e5cd6679c218f14227797e5659, disabling compactions & flushes 2024-11-13T16:39:36,055 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:39:36,055 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:39:36,055 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. after waiting 0 ms 2024-11-13T16:39:36,055 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:39:36,055 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T16:39:36,055 INFO [RS:0;f8f85b0bb1d8:38437 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T16:39:36,056 INFO [RS:0;f8f85b0bb1d8:38437 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T16:39:36,056 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(3091): Received CLOSE for 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:39:36,056 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(959): stopping server f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:39:36,056 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T16:39:36,056 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 618c30cdb779cfeb9cea6e5be1e51395, disabling compactions & flushes 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T16:39:36,056 INFO [RS:0;f8f85b0bb1d8:38437 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;f8f85b0bb1d8:38437. 2024-11-13T16:39:36,056 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:39:36,056 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T16:39:36,056 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. after waiting 0 ms 2024-11-13T16:39:36,056 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:39:36,056 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T16:39:36,056 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T16:39:36,056 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(959): stopping server f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T16:39:36,056 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 618c30cdb779cfeb9cea6e5be1e51395 1/1 column families, dataSize=190 B heapSize=672 B 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;f8f85b0bb1d8:38523. 2024-11-13T16:39:36,056 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:39:36,056 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T16:39:36,056 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,056 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T16:39:36,056 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1325): Online Regions={618c30cdb779cfeb9cea6e5be1e51395=hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395.} 2024-11-13T16:39:36,056 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1325): Online Regions={6d9281e5cd6679c218f14227797e5659=testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659.} 2024-11-13T16:39:36,057 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T16:39:36,057 INFO [RS:1;f8f85b0bb1d8:39619 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T16:39:36,057 INFO [RS:1;f8f85b0bb1d8:39619 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T16:39:36,057 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(3091): Received CLOSE for 33bde9964e83af90386f782c94c4e709 2024-11-13T16:39:36,057 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(959): stopping server f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:39:36,057 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T16:39:36,057 INFO [RS:1;f8f85b0bb1d8:39619 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;f8f85b0bb1d8:39619. 2024-11-13T16:39:36,057 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 33bde9964e83af90386f782c94c4e709, disabling compactions & flushes 2024-11-13T16:39:36,057 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:39:36,057 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:39:36,057 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,057 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1351): Waiting on 6d9281e5cd6679c218f14227797e5659 2024-11-13T16:39:36,057 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:39:36,057 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. after waiting 0 ms 2024-11-13T16:39:36,057 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:39:36,058 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T16:39:36,058 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T16:39:36,058 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T16:39:36,058 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T16:39:36,058 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1351): Waiting on 618c30cdb779cfeb9cea6e5be1e51395 2024-11-13T16:39:36,059 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T16:39:36,059 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1325): Online Regions={33bde9964e83af90386f782c94c4e709=testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T16:39:36,059 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 33bde9964e83af90386f782c94c4e709 2024-11-13T16:39:36,060 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T16:39:36,060 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T16:39:36,060 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T16:39:36,060 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T16:39:36,060 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T16:39:36,060 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.96 KB heapSize=5.66 KB 2024-11-13T16:39:36,064 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/6d9281e5cd6679c218f14227797e5659/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T16:39:36,064 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,065 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:39:36,065 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6d9281e5cd6679c218f14227797e5659: Waiting for close lock at 1731515976054Running coprocessor pre-close hooks at 1731515976055 (+1 ms)Disabling compacts and flushes for region at 1731515976055Disabling writes for close at 1731515976055Writing region close event to WAL at 1731515976060 (+5 ms)Running coprocessor post-close hooks at 1731515976064 (+4 ms)Closed at 1731515976064 2024-11-13T16:39:36,065 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/default/testExportExpiredSnapshot/33bde9964e83af90386f782c94c4e709/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-13T16:39:36,065 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1731515774068.6d9281e5cd6679c218f14227797e5659. 2024-11-13T16:39:36,065 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,065 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:39:36,065 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 33bde9964e83af90386f782c94c4e709: Waiting for close lock at 1731515976057Running coprocessor pre-close hooks at 1731515976057Disabling compacts and flushes for region at 1731515976057Disabling writes for close at 1731515976057Writing region close event to WAL at 1731515976061 (+4 ms)Running coprocessor post-close hooks at 1731515976065 (+4 ms)Closed at 1731515976065 2024-11-13T16:39:36,066 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1731515774068.33bde9964e83af90386f782c94c4e709. 2024-11-13T16:39:36,066 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/info/77fd9af7837e41deaadf25c3da7b85fd is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767./info:/1731515941075/DeleteFamily/seqid=0 2024-11-13T16:39:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742404_1580 (size=5985) 2024-11-13T16:39:36,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742404_1580 (size=5985) 2024-11-13T16:39:36,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742404_1580 (size=5985) 2024-11-13T16:39:36,077 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.05 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/info/77fd9af7837e41deaadf25c3da7b85fd 2024-11-13T16:39:36,079 INFO [regionserver/f8f85b0bb1d8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T16:39:36,082 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/.tmp/l/f2c5377c73fa4040a1cf725c33147c87 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1731515940915/DeleteFamily/seqid=0 2024-11-13T16:39:36,088 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/ns/15ff787fc2b04568bcc33d1685de5924 is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767./ns:/1731515941027/DeleteFamily/seqid=0 2024-11-13T16:39:36,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742405_1581 (size=5142) 2024-11-13T16:39:36,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742405_1581 (size=5142) 2024-11-13T16:39:36,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742405_1581 (size=5142) 2024-11-13T16:39:36,093 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=33 (bloomFilter=false), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/.tmp/l/f2c5377c73fa4040a1cf725c33147c87 2024-11-13T16:39:36,098 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f2c5377c73fa4040a1cf725c33147c87 2024-11-13T16:39:36,099 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/.tmp/l/f2c5377c73fa4040a1cf725c33147c87 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/l/f2c5377c73fa4040a1cf725c33147c87 2024-11-13T16:39:36,107 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f2c5377c73fa4040a1cf725c33147c87 2024-11-13T16:39:36,107 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/l/f2c5377c73fa4040a1cf725c33147c87, entries=2, sequenceid=33, filesize=5.0 K 2024-11-13T16:39:36,108 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 618c30cdb779cfeb9cea6e5be1e51395 in 52ms, sequenceid=33, compaction requested=false 2024-11-13T16:39:36,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742406_1582 (size=5738) 2024-11-13T16:39:36,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742406_1582 (size=5738) 2024-11-13T16:39:36,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742406_1582 (size=5738) 2024-11-13T16:39:36,114 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=229 B at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/ns/15ff787fc2b04568bcc33d1685de5924 2024-11-13T16:39:36,115 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/acl/618c30cdb779cfeb9cea6e5be1e51395/recovered.edits/36.seqid, newMaxSeqId=36, maxSeqId=27 2024-11-13T16:39:36,116 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,116 INFO [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:39:36,116 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 618c30cdb779cfeb9cea6e5be1e51395: Waiting for close lock at 1731515976056Running coprocessor pre-close hooks at 1731515976056Disabling compacts and flushes for region at 1731515976056Disabling writes for close at 1731515976056Obtaining lock to block concurrent updates at 1731515976056Preparing flush snapshotting stores in 618c30cdb779cfeb9cea6e5be1e51395 at 1731515976056Finished memstore snapshotting hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1731515976057 (+1 ms)Flushing stores of hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. at 1731515976057Flushing 618c30cdb779cfeb9cea6e5be1e51395/l: creating writer at 1731515976058 (+1 ms)Flushing 618c30cdb779cfeb9cea6e5be1e51395/l: appending metadata at 1731515976081 (+23 ms)Flushing 618c30cdb779cfeb9cea6e5be1e51395/l: closing flushed file at 1731515976081Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@586f1281: reopening flushed file at 1731515976098 (+17 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 618c30cdb779cfeb9cea6e5be1e51395 in 52ms, sequenceid=33, compaction requested=false at 1731515976108 (+10 ms)Writing region close event to WAL at 1731515976111 (+3 ms)Running coprocessor post-close hooks at 1731515976115 (+4 ms)Closed at 1731515976116 (+1 ms) 2024-11-13T16:39:36,116 DEBUG [RS_CLOSE_REGION-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1731515535953.618c30cdb779cfeb9cea6e5be1e51395. 2024-11-13T16:39:36,126 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/rep_barrier/8822a40482ac4f0d90a16f4fd2567313 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767./rep_barrier:/1731515941027/DeleteFamily/seqid=0 2024-11-13T16:39:36,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742407_1583 (size=5784) 2024-11-13T16:39:36,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742407_1583 (size=5784) 2024-11-13T16:39:36,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742407_1583 (size=5784) 2024-11-13T16:39:36,134 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=247 B at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/rep_barrier/8822a40482ac4f0d90a16f4fd2567313 2024-11-13T16:39:36,144 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/table/92d9c126f3414914af08fed5b577eb64 is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1731515910179.9639880e5004ddd3885ba2a5ed6a3767./table:/1731515941027/DeleteFamily/seqid=0 2024-11-13T16:39:36,149 INFO [regionserver/f8f85b0bb1d8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T16:39:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742408_1584 (size=5748) 2024-11-13T16:39:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742408_1584 (size=5748) 2024-11-13T16:39:36,154 INFO [regionserver/f8f85b0bb1d8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T16:39:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742408_1584 (size=5748) 2024-11-13T16:39:36,155 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=458 B at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/table/92d9c126f3414914af08fed5b577eb64 2024-11-13T16:39:36,160 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/info/77fd9af7837e41deaadf25c3da7b85fd as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/info/77fd9af7837e41deaadf25c3da7b85fd 2024-11-13T16:39:36,165 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/info/77fd9af7837e41deaadf25c3da7b85fd, entries=4, sequenceid=206, filesize=5.8 K 2024-11-13T16:39:36,166 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/ns/15ff787fc2b04568bcc33d1685de5924 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/ns/15ff787fc2b04568bcc33d1685de5924 2024-11-13T16:39:36,171 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/ns/15ff787fc2b04568bcc33d1685de5924, entries=2, sequenceid=206, filesize=5.6 K 2024-11-13T16:39:36,171 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/rep_barrier/8822a40482ac4f0d90a16f4fd2567313 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/rep_barrier/8822a40482ac4f0d90a16f4fd2567313 2024-11-13T16:39:36,176 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/rep_barrier/8822a40482ac4f0d90a16f4fd2567313, entries=2, sequenceid=206, filesize=5.6 K 2024-11-13T16:39:36,176 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/.tmp/table/92d9c126f3414914af08fed5b577eb64 as hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/table/92d9c126f3414914af08fed5b577eb64 2024-11-13T16:39:36,181 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/table/92d9c126f3414914af08fed5b577eb64, entries=3, sequenceid=206, filesize=5.6 K 2024-11-13T16:39:36,182 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.96 KB/3031, heapSize ~5.59 KB/5728, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=206, compaction requested=false 2024-11-13T16:39:36,187 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/data/hbase/meta/1588230740/recovered.edits/209.seqid, newMaxSeqId=209, maxSeqId=1 2024-11-13T16:39:36,187 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:36,187 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T16:39:36,188 INFO [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T16:39:36,188 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731515976059Running coprocessor pre-close hooks at 1731515976059Disabling compacts and flushes for region at 1731515976060 (+1 ms)Disabling writes for close at 1731515976060Obtaining lock to block concurrent updates at 1731515976060Preparing flush snapshotting stores in 1588230740 at 1731515976060Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=3031, getHeapSize=5728, getOffHeapSize=0, getCellsCount=23 at 1731515976061 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731515976062 (+1 ms)Flushing 1588230740/info: creating writer at 1731515976062Flushing 1588230740/info: appending metadata at 1731515976065 (+3 ms)Flushing 1588230740/info: closing flushed file at 1731515976065Flushing 1588230740/ns: creating writer at 1731515976084 (+19 ms)Flushing 1588230740/ns: appending metadata at 1731515976088 (+4 ms)Flushing 1588230740/ns: closing flushed file at 1731515976088Flushing 1588230740/rep_barrier: creating writer at 1731515976122 (+34 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731515976125 (+3 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731515976125Flushing 1588230740/table: creating writer at 1731515976139 (+14 ms)Flushing 1588230740/table: appending metadata at 1731515976144 (+5 ms)Flushing 1588230740/table: closing flushed file at 1731515976144Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4999bd0e: reopening flushed file at 1731515976159 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18ef9538: reopening flushed file at 1731515976165 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@661a9dc1: reopening flushed file at 1731515976171 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@725b2085: reopening flushed file at 1731515976176 (+5 ms)Finished flush of dataSize ~2.96 KB/3031, heapSize ~5.59 KB/5728, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=206, compaction requested=false at 1731515976182 (+6 ms)Writing region close event to WAL at 1731515976184 (+2 ms)Running coprocessor post-close hooks at 1731515976187 (+3 ms)Closed at 1731515976187 2024-11-13T16:39:36,188 DEBUG [RS_CLOSE_META-regionserver/f8f85b0bb1d8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T16:39:36,258 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(976): stopping server f8f85b0bb1d8,38523,1731515529292; all regions closed. 2024-11-13T16:39:36,258 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(976): stopping server f8f85b0bb1d8,38437,1731515528789; all regions closed. 2024-11-13T16:39:36,259 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(976): stopping server f8f85b0bb1d8,39619,1731515529105; all regions closed. 2024-11-13T16:39:36,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741834_1010 (size=13442) 2024-11-13T16:39:36,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741835_1011 (size=15193) 2024-11-13T16:39:36,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741834_1010 (size=13442) 2024-11-13T16:39:36,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741834_1010 (size=13442) 2024-11-13T16:39:36,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741835_1011 (size=15193) 2024-11-13T16:39:36,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741835_1011 (size=15193) 2024-11-13T16:39:36,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741836_1012 (size=83880) 2024-11-13T16:39:36,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741836_1012 (size=83880) 2024-11-13T16:39:36,270 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs 2024-11-13T16:39:36,271 INFO [RS:2;f8f85b0bb1d8:38523 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL f8f85b0bb1d8%2C38523%2C1731515529292:(num 1731515533668) 2024-11-13T16:39:36,271 DEBUG [RS:2;f8f85b0bb1d8:38523 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,271 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T16:39:36,271 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T16:39:36,271 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.ChoreService(370): Chore service for: regionserver/f8f85b0bb1d8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T16:39:36,272 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T16:39:36,272 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T16:39:36,272 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T16:39:36,272 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T16:39:36,272 INFO [regionserver/f8f85b0bb1d8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T16:39:36,272 INFO [RS:2;f8f85b0bb1d8:38523 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38523 2024-11-13T16:39:36,272 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs 2024-11-13T16:39:36,272 INFO [RS:0;f8f85b0bb1d8:38437 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL f8f85b0bb1d8%2C38437%2C1731515528789:(num 1731515533615) 2024-11-13T16:39:36,272 DEBUG [RS:0;f8f85b0bb1d8:38437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,272 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T16:39:36,272 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T16:39:36,273 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.ChoreService(370): Chore service for: regionserver/f8f85b0bb1d8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T16:39:36,273 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T16:39:36,273 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T16:39:36,273 INFO [regionserver/f8f85b0bb1d8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T16:39:36,273 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T16:39:36,273 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T16:39:36,273 INFO [RS:0;f8f85b0bb1d8:38437 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38437 2024-11-13T16:39:36,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741836_1012 (size=83880) 2024-11-13T16:39:36,276 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs 2024-11-13T16:39:36,276 INFO [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL f8f85b0bb1d8%2C39619%2C1731515529105.meta:.meta(num 1731515534795) 2024-11-13T16:39:36,280 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/WALs/f8f85b0bb1d8,39619,1731515529105/f8f85b0bb1d8%2C39619%2C1731515529105.1731515533637 not finished, retry = 0 2024-11-13T16:39:36,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073741833_1009 (size=9365) 2024-11-13T16:39:36,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073741833_1009 (size=9365) 2024-11-13T16:39:36,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741833_1009 (size=9365) 2024-11-13T16:39:36,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T16:39:36,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f8f85b0bb1d8,38523,1731515529292 2024-11-13T16:39:36,306 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T16:39:36,358 INFO [regionserver/f8f85b0bb1d8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T16:39:36,358 INFO [regionserver/f8f85b0bb1d8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T16:39:36,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f8f85b0bb1d8,38437,1731515528789 2024-11-13T16:39:36,360 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T16:39:36,360 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$356/0x00007f7a4c902fa0@47706a8c rejected from java.util.concurrent.ThreadPoolExecutor@272ec06[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 61] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-13T16:39:36,382 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/oldWALs 2024-11-13T16:39:36,382 INFO [RS:1;f8f85b0bb1d8:39619 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL f8f85b0bb1d8%2C39619%2C1731515529105:(num 1731515533637) 2024-11-13T16:39:36,382 DEBUG [RS:1;f8f85b0bb1d8:39619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T16:39:36,382 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T16:39:36,382 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T16:39:36,382 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.ChoreService(370): Chore service for: regionserver/f8f85b0bb1d8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T16:39:36,383 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T16:39:36,383 INFO [regionserver/f8f85b0bb1d8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T16:39:36,383 INFO [RS:1;f8f85b0bb1d8:39619 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39619 2024-11-13T16:39:36,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f8f85b0bb1d8,38523,1731515529292] 2024-11-13T16:39:36,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/f8f85b0bb1d8,39619,1731515529105 2024-11-13T16:39:36,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T16:39:36,435 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T16:39:36,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:39:36,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38523-0x1013637f1800003, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:39:36,499 INFO [RS:2;f8f85b0bb1d8:38523 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T16:39:36,499 INFO [RS:2;f8f85b0bb1d8:38523 {}] regionserver.HRegionServer(1031): Exiting; stopping=f8f85b0bb1d8,38523,1731515529292; zookeeper connection closed. 2024-11-13T16:39:36,500 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75bf6ea3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75bf6ea3 2024-11-13T16:39:36,521 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f8f85b0bb1d8,38523,1731515529292 already deleted, retry=false 2024-11-13T16:39:36,521 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f8f85b0bb1d8,38523,1731515529292 expired; onlineServers=2 2024-11-13T16:39:36,521 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f8f85b0bb1d8,38437,1731515528789] 2024-11-13T16:39:36,535 INFO [RS:0;f8f85b0bb1d8:38437 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T16:39:36,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:39:36,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38437-0x1013637f1800001, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:39:36,535 INFO [RS:0;f8f85b0bb1d8:38437 {}] regionserver.HRegionServer(1031): Exiting; stopping=f8f85b0bb1d8,38437,1731515528789; zookeeper connection closed. 2024-11-13T16:39:36,535 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@9db3a86 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@9db3a86 2024-11-13T16:39:36,596 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f8f85b0bb1d8,38437,1731515528789 already deleted, retry=false 2024-11-13T16:39:36,596 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f8f85b0bb1d8,38437,1731515528789 expired; onlineServers=1 2024-11-13T16:39:36,597 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [f8f85b0bb1d8,39619,1731515529105] 2024-11-13T16:39:36,631 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/f8f85b0bb1d8,39619,1731515529105 already deleted, retry=false 2024-11-13T16:39:36,631 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; f8f85b0bb1d8,39619,1731515529105 expired; onlineServers=0 2024-11-13T16:39:36,631 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'f8f85b0bb1d8,45809,1731515527071' ***** 2024-11-13T16:39:36,631 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T16:39:36,632 INFO [M:0;f8f85b0bb1d8:45809 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T16:39:36,632 INFO [M:0;f8f85b0bb1d8:45809 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T16:39:36,632 DEBUG [M:0;f8f85b0bb1d8:45809 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T16:39:36,632 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T16:39:36,632 DEBUG [M:0;f8f85b0bb1d8:45809 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T16:39:36,632 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster-HFileCleaner.large.0-1731515532746 {}] cleaner.HFileCleaner(306): Exit Thread[master/f8f85b0bb1d8:0:becomeActiveMaster-HFileCleaner.large.0-1731515532746,5,FailOnTimeoutGroup] 2024-11-13T16:39:36,632 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster-HFileCleaner.small.0-1731515532769 {}] cleaner.HFileCleaner(306): Exit Thread[master/f8f85b0bb1d8:0:becomeActiveMaster-HFileCleaner.small.0-1731515532769,5,FailOnTimeoutGroup] 2024-11-13T16:39:36,632 INFO [M:0;f8f85b0bb1d8:45809 {}] hbase.ChoreService(370): Chore service for: master/f8f85b0bb1d8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T16:39:36,632 INFO [M:0;f8f85b0bb1d8:45809 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T16:39:36,632 DEBUG [M:0;f8f85b0bb1d8:45809 {}] master.HMaster(1795): Stopping service threads 2024-11-13T16:39:36,632 INFO [M:0;f8f85b0bb1d8:45809 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T16:39:36,632 INFO [M:0;f8f85b0bb1d8:45809 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T16:39:36,633 INFO [M:0;f8f85b0bb1d8:45809 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T16:39:36,633 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T16:39:36,652 INFO [RS:1;f8f85b0bb1d8:39619 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T16:39:36,652 INFO [RS:1;f8f85b0bb1d8:39619 {}] regionserver.HRegionServer(1031): Exiting; stopping=f8f85b0bb1d8,39619,1731515529105; zookeeper connection closed. 2024-11-13T16:39:36,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:39:36,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39619-0x1013637f1800002, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:39:36,652 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3bc7b117 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3bc7b117 2024-11-13T16:39:36,653 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-13T16:39:36,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T16:39:36,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T16:39:36,670 DEBUG [M:0;f8f85b0bb1d8:45809 {}] zookeeper.ZKUtil(347): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T16:39:36,670 WARN [M:0;f8f85b0bb1d8:45809 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T16:39:36,671 INFO [M:0;f8f85b0bb1d8:45809 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/.lastflushedseqids 2024-11-13T16:39:36,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43257 is added to blk_1073742409_1585 (size=316) 2024-11-13T16:39:36,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33001 is added to blk_1073742409_1585 (size=316) 2024-11-13T16:39:36,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742409_1585 (size=316) 2024-11-13T16:39:36,684 INFO [M:0;f8f85b0bb1d8:45809 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T16:39:36,684 INFO [M:0;f8f85b0bb1d8:45809 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T16:39:36,684 DEBUG [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T16:39:36,697 INFO [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:39:36,697 DEBUG [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:39:36,697 DEBUG [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T16:39:36,697 DEBUG [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T16:39:36,697 INFO [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=840.13 KB heapSize=1008.89 KB 2024-11-13T16:39:36,698 ERROR [AsyncFSWAL-0-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:39:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T16:39:37,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T16:39:37,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-13T16:39:37,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-13T16:39:37,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:37,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-13T16:39:37,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-13T16:39:41,583 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:40:05,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:40:35,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;f8f85b0bb1d8:45809 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@25e05b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26fb8ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@420b1d9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10105 Waited count: 10840 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@52981ddf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@28c41298 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1bafbe15): State: TIMED_WAITING Blocked count: 0 Waited count: 1032 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp512792038-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp512792038-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp512792038-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp512792038-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp512792038-41-acceptor-0@2892a159-ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:42775}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp512792038-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp512792038-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp512792038-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7d9a9fe9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 3145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b59d0e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37215): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@4c88cf4d): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@187a8ab0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 48721 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dad7c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37215): State: TIMED_WAITING Blocked count: 97 Waited count: 2396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37215): State: TIMED_WAITING Blocked count: 96 Waited count: 2412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37215): State: TIMED_WAITING Blocked count: 83 Waited count: 2410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37215): State: TIMED_WAITING Blocked count: 94 Waited count: 2412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37215): State: TIMED_WAITING Blocked count: 72 Waited count: 2403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6e425f80): State: TIMED_WAITING Blocked count: 0 Waited count: 257 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@270b1256): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@200ad9a4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@2b2de817): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1511600402-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1511600402-87-acceptor-0@2c7712cc-ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:34985}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1511600402-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1511600402-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-646a8aa-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4a7c9e32): State: TIMED_WAITING Blocked count: 0 Waited count: 1026 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 36869): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17e09b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1369 Waited count: 1577 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@beb4f39): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 3 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (IPC Server handler 4 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins): State: TIMED_WAITING Blocked count: 1383 Waited count: 1383 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 107 (IPC Parameter Sending Thread for localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 0 Waited count: 1876 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp320088676-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp320088676-122-acceptor-0@7cd90a5b-ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:36973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp320088676-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp320088676-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-1a8f6ff6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@36ca15be): State: TIMED_WAITING Blocked count: 0 Waited count: 1025 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 42231): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 301 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@622ee6df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1365 Waited count: 1565 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5842990f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 536 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp350913070-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp350913070-154-acceptor-0@840696b-ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:33471}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp350913070-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp350913070-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7a433b12-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@260d1363): State: TIMED_WAITING Blocked count: 0 Waited count: 1025 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42229): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 253 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36104b0b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1375 Waited count: 1565 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4bbc55d9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 204 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@7d68d46[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@25df3c31[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3e184add[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58220): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 3 Waited count: 426 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@621c57d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:58220):): State: WAITING Blocked count: 4 Waited count: 524 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572eb90b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 548 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8b9ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 249 (LeaseRenewer:jenkins@localhost:37215): State: TIMED_WAITING Blocked count: 14 Waited count: 531 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6c6d3c70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 448 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 50 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:58220)): State: RUNNABLE Blocked count: 59 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f1d3773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82379a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 90 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d285a6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 97 Waited count: 433 Waiting on java.util.concurrent.Semaphore$NonfairSync@a36dc41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 115 Waited count: 507 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c66d4db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809): State: WAITING Blocked count: 48 Waited count: 12251 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f1b74a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1cc7b49f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56cd6bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62c7b3c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fb5c8c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e77f0ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;f8f85b0bb1d8:45809): State: TIMED_WAITING Blocked count: 12 Waited count: 4450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1076/0x00007f7a4cf8db50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@388055ab): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5032 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cec442e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50272 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 74 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@257200d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21047789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fe79ab3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38adb161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49940 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 558 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 575 Waiting on java.util.concurrent.ForkJoinPool@fd6dc34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 552 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1049 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6924652d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1300 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1510 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@676f794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1676 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 355 Waiting on java.util.concurrent.ForkJoinPool@fd6dc34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1741 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1742 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1987 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1988 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5047 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5048 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5049 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9054 (AsyncFSWAL-1-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f78bc12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9057 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T16:41:05,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:41:35,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;f8f85b0bb1d8:45809 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@25e05b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26fb8ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@7600152f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10105 Waited count: 10841 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@52981ddf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@28c41298 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1bafbe15): State: TIMED_WAITING Blocked count: 0 Waited count: 1152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp512792038-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp512792038-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp512792038-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp512792038-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp512792038-41-acceptor-0@2892a159-ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:42775}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp512792038-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp512792038-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp512792038-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7d9a9fe9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 3145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b59d0e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37215): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@4c88cf4d): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@187a8ab0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 54672 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dad7c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37215): State: TIMED_WAITING Blocked count: 97 Waited count: 2457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37215): State: TIMED_WAITING Blocked count: 96 Waited count: 2472 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37215): State: TIMED_WAITING Blocked count: 83 Waited count: 2470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37215): State: TIMED_WAITING Blocked count: 94 Waited count: 2472 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37215): State: TIMED_WAITING Blocked count: 72 Waited count: 2464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6e425f80): State: TIMED_WAITING Blocked count: 0 Waited count: 287 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@270b1256): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@200ad9a4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@2b2de817): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1511600402-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1511600402-87-acceptor-0@2c7712cc-ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:34985}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1511600402-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1511600402-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-646a8aa-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4a7c9e32): State: TIMED_WAITING Blocked count: 0 Waited count: 1146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 36869): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 325 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17e09b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1389 Waited count: 1617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@beb4f39): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 3 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (IPC Server handler 4 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins): State: TIMED_WAITING Blocked count: 1443 Waited count: 1443 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 107 (IPC Parameter Sending Thread for localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 0 Waited count: 1936 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp320088676-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp320088676-122-acceptor-0@7cd90a5b-ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:36973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp320088676-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp320088676-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-1a8f6ff6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@36ca15be): State: TIMED_WAITING Blocked count: 0 Waited count: 1145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 42231): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 321 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@622ee6df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1385 Waited count: 1605 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5842990f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp350913070-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp350913070-154-acceptor-0@840696b-ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:33471}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp350913070-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp350913070-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7a433b12-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@260d1363): State: TIMED_WAITING Blocked count: 0 Waited count: 1145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42229): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 273 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36104b0b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1395 Waited count: 1605 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4bbc55d9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 204 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@7d68d46[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@25df3c31[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3e184add[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58220): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 3 Waited count: 430 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@621c57d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:58220):): State: WAITING Blocked count: 4 Waited count: 528 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572eb90b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 552 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8b9ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6c6d3c70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 50 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:58220)): State: RUNNABLE Blocked count: 59 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f1d3773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82379a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d285a6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 97 Waited count: 433 Waiting on java.util.concurrent.Semaphore$NonfairSync@a36dc41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 115 Waited count: 507 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c66d4db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809): State: WAITING Blocked count: 48 Waited count: 12251 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f1b74a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1cc7b49f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56cd6bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62c7b3c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fb5c8c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e77f0ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;f8f85b0bb1d8:45809): State: TIMED_WAITING Blocked count: 12 Waited count: 4450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1076/0x00007f7a4cf8db50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@388055ab): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5632 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cec442e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56274 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 74 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@257200d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21047789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fe79ab3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38adb161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55943 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 558 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1049 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6924652d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1510 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@676f794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1676 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 355 Waiting on java.util.concurrent.ForkJoinPool@fd6dc34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1741 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1742 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1987 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1988 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5047 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5048 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5049 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9054 (AsyncFSWAL-1-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f78bc12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9057 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 9058 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:42:05,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:42:09,527 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=27, reuseRatio=72.97% 2024-11-13T16:42:09,528 DEBUG [master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T16:42:20,502 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-13T16:42:35,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;f8f85b0bb1d8:45809 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@25e05b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 36 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26fb8ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@3d192afd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10105 Waited count: 10842 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@52981ddf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@28c41298 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1bafbe15): State: TIMED_WAITING Blocked count: 0 Waited count: 1272 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp512792038-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp512792038-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp512792038-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp512792038-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp512792038-41-acceptor-0@2892a159-ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:42775}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp512792038-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp512792038-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp512792038-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7d9a9fe9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 3145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b59d0e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37215): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@4c88cf4d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@187a8ab0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 60621 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dad7c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37215): State: TIMED_WAITING Blocked count: 97 Waited count: 2517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37215): State: TIMED_WAITING Blocked count: 96 Waited count: 2533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37215): State: TIMED_WAITING Blocked count: 83 Waited count: 2531 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37215): State: TIMED_WAITING Blocked count: 94 Waited count: 2533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37215): State: TIMED_WAITING Blocked count: 72 Waited count: 2524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6e425f80): State: TIMED_WAITING Blocked count: 0 Waited count: 317 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@270b1256): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@200ad9a4): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@2b2de817): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1511600402-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1511600402-87-acceptor-0@2c7712cc-ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:34985}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1511600402-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1511600402-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-646a8aa-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4a7c9e32): State: TIMED_WAITING Blocked count: 0 Waited count: 1266 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 36869): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 345 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17e09b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1409 Waited count: 1657 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@beb4f39): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 636 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 3 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (IPC Server handler 4 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins): State: TIMED_WAITING Blocked count: 1503 Waited count: 1503 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 107 (IPC Parameter Sending Thread for localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 0 Waited count: 1996 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp320088676-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp320088676-122-acceptor-0@7cd90a5b-ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:36973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp320088676-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp320088676-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-1a8f6ff6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@36ca15be): State: TIMED_WAITING Blocked count: 0 Waited count: 1265 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 42231): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@622ee6df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1405 Waited count: 1645 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5842990f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 683 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 638 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp350913070-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp350913070-154-acceptor-0@840696b-ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:33471}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp350913070-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp350913070-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7a433b12-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@260d1363): State: TIMED_WAITING Blocked count: 0 Waited count: 1265 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42229): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 293 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36104b0b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1415 Waited count: 1645 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4bbc55d9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 637 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 204 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d2232de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ff458f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a4f294e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@7d68d46[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@25df3c31[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3e184add[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58220): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 316 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 3 Waited count: 435 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@621c57d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:58220):): State: WAITING Blocked count: 4 Waited count: 533 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572eb90b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 557 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8b9ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6c6d3c70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 50 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:58220)): State: RUNNABLE Blocked count: 59 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f1d3773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82379a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d285a6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 97 Waited count: 433 Waiting on java.util.concurrent.Semaphore$NonfairSync@a36dc41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 115 Waited count: 507 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c66d4db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809): State: WAITING Blocked count: 48 Waited count: 12251 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f1b74a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1cc7b49f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56cd6bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62c7b3c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fb5c8c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e77f0ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;f8f85b0bb1d8:45809): State: TIMED_WAITING Blocked count: 12 Waited count: 4450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1076/0x00007f7a4cf8db50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@388055ab): State: TIMED_WAITING Blocked count: 0 Waited count: 209 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6231 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cec442e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62277 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 74 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@257200d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21047789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fe79ab3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38adb161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61945 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-1): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 564 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1049 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6924652d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1510 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@676f794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1676 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 356 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1741 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1742 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1987 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1988 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5047 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5048 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5049 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9054 (AsyncFSWAL-1-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f78bc12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9058 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T16:43:05,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:43:35,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;f8f85b0bb1d8:45809 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@25e05b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26fb8ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6992 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 71 Waiting on java.util.concurrent.CountDownLatch$Sync@4f919798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10105 Waited count: 10843 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@52981ddf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@28c41298 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1bafbe15): State: TIMED_WAITING Blocked count: 0 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp512792038-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp512792038-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp512792038-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp512792038-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp512792038-41-acceptor-0@2892a159-ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:42775}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp512792038-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp512792038-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp512792038-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7d9a9fe9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 3145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b59d0e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37215): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@4c88cf4d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@187a8ab0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 66576 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dad7c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37215): State: TIMED_WAITING Blocked count: 97 Waited count: 2579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37215): State: TIMED_WAITING Blocked count: 96 Waited count: 2595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37215): State: TIMED_WAITING Blocked count: 83 Waited count: 2592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37215): State: TIMED_WAITING Blocked count: 94 Waited count: 2594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37215): State: TIMED_WAITING Blocked count: 72 Waited count: 2586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6e425f80): State: TIMED_WAITING Blocked count: 0 Waited count: 348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@270b1256): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@200ad9a4): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@2b2de817): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1511600402-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1511600402-87-acceptor-0@2c7712cc-ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:34985}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1511600402-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1511600402-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-646a8aa-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4a7c9e32): State: TIMED_WAITING Blocked count: 0 Waited count: 1386 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 36869): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 365 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17e09b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1429 Waited count: 1697 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@beb4f39): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 696 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 3 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (IPC Server handler 4 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins): State: TIMED_WAITING Blocked count: 1563 Waited count: 1563 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 107 (IPC Parameter Sending Thread for localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 0 Waited count: 2056 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp320088676-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp320088676-122-acceptor-0@7cd90a5b-ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:36973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp320088676-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp320088676-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-1a8f6ff6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@36ca15be): State: TIMED_WAITING Blocked count: 0 Waited count: 1385 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 42231): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@622ee6df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1425 Waited count: 1685 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5842990f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 760 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 698 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp350913070-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp350913070-154-acceptor-0@840696b-ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:33471}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp350913070-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp350913070-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7a433b12-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@260d1363): State: TIMED_WAITING Blocked count: 0 Waited count: 1385 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42229): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 313 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36104b0b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1435 Waited count: 1685 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4bbc55d9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 204 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d2232de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ff458f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a4f294e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@7d68d46[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@25df3c31[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3e184add[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58220): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 3 Waited count: 439 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@621c57d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:58220):): State: WAITING Blocked count: 4 Waited count: 537 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572eb90b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 561 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8b9ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6c6d3c70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 541 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 50 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:58220)): State: RUNNABLE Blocked count: 59 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f1d3773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82379a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 92 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d285a6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 97 Waited count: 433 Waiting on java.util.concurrent.Semaphore$NonfairSync@a36dc41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 115 Waited count: 507 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c66d4db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809): State: WAITING Blocked count: 48 Waited count: 12251 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f1b74a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1cc7b49f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56cd6bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62c7b3c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fb5c8c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e77f0ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;f8f85b0bb1d8:45809): State: TIMED_WAITING Blocked count: 12 Waited count: 4450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1076/0x00007f7a4cf8db50.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@388055ab): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6831 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cec442e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68278 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 74 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@257200d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21047789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fe79ab3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38adb161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67947 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-1): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1049 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6924652d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1510 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@676f794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1741 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1742 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1987 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1988 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5047 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5048 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5049 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9054 (AsyncFSWAL-1-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f78bc12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9058 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-13T16:44:05,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:44:35,913 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T16:44:36,699 DEBUG [M:0;f8f85b0bb1d8:45809 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731515976684Disabling compacts and flushes for region at 1731515976684Disabling writes for close at 1731515976697 (+13 ms)Obtaining lock to block concurrent updates at 1731515976697Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731515976697Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=860298, getHeapSize=1033040, getOffHeapSize=0, getCellsCount=2277 at 1731515976698 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1731516276699 (+300001 ms) 2024-11-13T16:44:36,699 WARN [M:0;f8f85b0bb1d8:45809 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3909, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3909, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-13T16:44:36,700 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:44:36,702 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T16:44:36,702 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T16:44:36,702 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 2024-11-13T16:44:36,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:44:36,706 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:44:36,706 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 2024-11-13T16:44:36,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;f8f85b0bb1d8:45809 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 3 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@25e05b67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 42 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 36 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26fb8ba7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 77 Waiting on java.util.concurrent.CountDownLatch$Sync@54c0b2b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10105 Waited count: 10844 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@52981ddf Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@28c41298 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1bafbe15): State: TIMED_WAITING Blocked count: 0 Waited count: 1512 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp512792038-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp512792038-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp512792038-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp512792038-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp512792038-41-acceptor-0@2892a159-ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:42775}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp512792038-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp512792038-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp512792038-44): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-7d9a9fe9-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 21 Waited count: 3145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b59d0e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37215): State: TIMED_WAITING Blocked count: 1 Waited count: 77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@4c88cf4d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@187a8ab0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 72528 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dad7c11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37215): State: TIMED_WAITING Blocked count: 97 Waited count: 2640 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37215): State: TIMED_WAITING Blocked count: 96 Waited count: 2656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37215): State: TIMED_WAITING Blocked count: 83 Waited count: 2653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37215): State: TIMED_WAITING Blocked count: 94 Waited count: 2655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37215): State: TIMED_WAITING Blocked count: 72 Waited count: 2647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6e425f80): State: TIMED_WAITING Blocked count: 0 Waited count: 378 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@270b1256): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@200ad9a4): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@2b2de817): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1511600402-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1511600402-87-acceptor-0@2c7712cc-ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:34985}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1511600402-88): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1511600402-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-646a8aa-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4a7c9e32): State: TIMED_WAITING Blocked count: 0 Waited count: 1506 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 36869): State: TIMED_WAITING Blocked count: 1 Waited count: 77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 385 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17e09b7b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1449 Waited count: 1737 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@beb4f39): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 756 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 3 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 109 (IPC Server handler 4 on default port 36869): State: TIMED_WAITING Blocked count: 0 Waited count: 755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Client (565235355) connection to localhost/127.0.0.1:37215 from jenkins): State: TIMED_WAITING Blocked count: 1623 Waited count: 1623 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 107 (IPC Parameter Sending Thread for localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 0 Waited count: 2116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp320088676-121): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp320088676-122-acceptor-0@7cd90a5b-ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:36973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp320088676-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp320088676-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-1a8f6ff6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@36ca15be): State: TIMED_WAITING Blocked count: 0 Waited count: 1505 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 42231): State: TIMED_WAITING Blocked count: 1 Waited count: 77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@622ee6df Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1445 Waited count: 1725 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5842990f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 812 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 838 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 42231): State: TIMED_WAITING Blocked count: 0 Waited count: 758 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp350913070-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f7a4c42d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp350913070-154-acceptor-0@840696b-ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:33471}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp350913070-155): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp350913070-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-7a433b12-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@260d1363): State: TIMED_WAITING Blocked count: 0 Waited count: 1505 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 42229): State: TIMED_WAITING Blocked count: 1 Waited count: 77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 333 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36104b0b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215): State: TIMED_WAITING Blocked count: 1455 Waited count: 1725 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4bbc55d9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 757 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 42229): State: TIMED_WAITING Blocked count: 0 Waited count: 754 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 189 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 204 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d2232de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ff458f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a4f294e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@7d68d46[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@25df3c31[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (java.util.concurrent.ThreadPoolExecutor$Worker@3e184add[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58220): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 376 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 3 Waited count: 444 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@621c57d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:58220):): State: WAITING Blocked count: 4 Waited count: 542 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572eb90b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 566 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8b9ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6c6d3c70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 50 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:58220)): State: RUNNABLE Blocked count: 59 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 6 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f1d3773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 1 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@82379a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 93 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ccf6cd1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@d285a6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 97 Waited count: 433 Waiting on java.util.concurrent.Semaphore$NonfairSync@a36dc41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 115 Waited count: 507 Waiting on java.util.concurrent.Semaphore$NonfairSync@7c66d4db Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45809): State: WAITING Blocked count: 48 Waited count: 12251 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f1b74a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20afb01f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1cc7b49f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@56cd6bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62c7b3c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45809): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3fb5c8c9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e77f0ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;f8f85b0bb1d8:45809): State: TIMED_WAITING Blocked count: 12 Waited count: 4451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1328/0x00007f7a4d218a20.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/f8f85b0bb1d8:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@388055ab): State: TIMED_WAITING Blocked count: 0 Waited count: 249 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7431 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 60 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 68 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4cec442e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 74280 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 74 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 47 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 453 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@257200d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 10 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@21047789 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fe79ab3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/f8f85b0bb1d8:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@38adb161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 9 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73949 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-1): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 584 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 983 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1049 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1095 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 106 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6924652d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1510 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@676f794 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1741 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1742 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@dec5a3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1987 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1988 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5047 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5048 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5049 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9054 (AsyncFSWAL-1-hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData-prefix:f8f85b0bb1d8,45809,1731515527071): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f78bc12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9063 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 9066 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9067 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1318/0x00007f7a4d217d98.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-13T16:44:40,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T16:44:41,700 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-13T16:44:41,701 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T16:44:41,701 INFO [M:0;f8f85b0bb1d8:45809 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T16:44:41,701 INFO [M:0;f8f85b0bb1d8:45809 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45809 2024-11-13T16:44:41,701 INFO [M:0;f8f85b0bb1d8:45809 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T16:44:41,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37215/user/jenkins/test-data/1ad5136d-a43c-477e-500d-f317514b13ce/MasterData/WALs/f8f85b0bb1d8,45809,1731515527071/f8f85b0bb1d8%2C45809%2C1731515527071.1731515530769 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-13T16:44:41,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:44:41,811 INFO [M:0;f8f85b0bb1d8:45809 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T16:44:41,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45809-0x1013637f1800000, quorum=127.0.0.1:58220, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T16:44:41,815 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1621d99c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T16:44:41,819 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35c67e1f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:44:41,819 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:44:41,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d51af8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T16:44:41,819 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78f1b706{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:44:41,823 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T16:44:41,823 WARN [BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T16:44:41,823 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T16:44:41,823 WARN [BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1731676653-172.17.0.3-1731515519385 (Datanode Uuid 1dc0131a-56f5-444b-8ad1-45336a30e1cb) service to localhost/127.0.0.1:37215 2024-11-13T16:44:41,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data5/current/BP-1731676653-172.17.0.3-1731515519385 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T16:44:41,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data6/current/BP-1731676653-172.17.0.3-1731515519385 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T16:44:41,826 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T16:44:41,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68f857ff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T16:44:41,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ec4654c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:44:41,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:44:41,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39ddae33{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T16:44:41,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4628bbe8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:44:41,833 WARN [BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T16:44:41,833 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T16:44:41,833 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T16:44:41,833 WARN [BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1731676653-172.17.0.3-1731515519385 (Datanode Uuid 01707b01-a2f9-4c88-9327-5f56d6c85f89) service to localhost/127.0.0.1:37215 2024-11-13T16:44:41,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data3/current/BP-1731676653-172.17.0.3-1731515519385 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T16:44:41,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data4/current/BP-1731676653-172.17.0.3-1731515519385 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T16:44:41,834 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T16:44:41,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75d3cbc1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T16:44:41,838 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@117de549{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:44:41,838 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:44:41,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c56f4f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T16:44:41,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48d007cf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:44:41,840 WARN [BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T16:44:41,840 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T16:44:41,840 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T16:44:41,840 WARN [BP-1731676653-172.17.0.3-1731515519385 heartbeating to localhost/127.0.0.1:37215 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1731676653-172.17.0.3-1731515519385 (Datanode Uuid 05a333e2-dd09-41c1-aa27-69e7d73515c0) service to localhost/127.0.0.1:37215 2024-11-13T16:44:41,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data2/current/BP-1731676653-172.17.0.3-1731515519385 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T16:44:41,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/cluster_bc895b26-8a42-a8b9-9756-cd495b8e10c8/data/data1/current/BP-1731676653-172.17.0.3-1731515519385 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T16:44:41,842 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T16:44:41,850 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f111d3b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T16:44:41,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@350e1e3e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T16:44:41,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T16:44:41,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2659399f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T16:44:41,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a95d1d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d3288b74-856e-02d8-83e7-92be74b2cd2c/hadoop.log.dir/,STOPPED} 2024-11-13T16:44:41,866 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T16:44:41,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down